Compare commits
1105 Commits
Author | SHA1 | Date |
---|---|---|
mindspore-ci-bot | 1e84d77969 | |
mindspore-ci-bot | b869325005 | |
yuzhenhua | 25770b2680 | |
mindspore-ci-bot | ea011a678c | |
changzherui | 9acd80d0d3 | |
mindspore-ci-bot | 00d199de25 | |
mindspore-ci-bot | 02a83838c4 | |
liubuyu | 03fa52d213 | |
mindspore-ci-bot | d78b4d8f29 | |
yuzhenhua | e085cee42e | |
chendongsheng | f8c62714a3 | |
mindspore-ci-bot | a2ed03d1e8 | |
mindspore-ci-bot | 08ae993429 | |
mindspore-ci-bot | 4cd3362bb6 | |
dayschan | eb9db6a074 | |
CaoJian | 65f0a3a255 | |
yao_yf | 6e2222a30e | |
wang_shaocong | 8fe60a46d8 | |
mindspore-ci-bot | 46a500294a | |
mindspore-ci-bot | 5158d0fe3c | |
mindspore-ci-bot | 6a9b1491a6 | |
liuxiao93 | f08b56140c | |
zhujingxuan | 140937c63c | |
mindspore-ci-bot | 9039737dd9 | |
mindspore-ci-bot | 4d0dcb371b | |
yuzhenhua | dcfcbdb7fc | |
anyrenwei | f8785aeb53 | |
mindspore-ci-bot | 4b09587d0b | |
mindspore-ci-bot | 26611f0c3a | |
mindspore-ci-bot | bc04567ef5 | |
mindspore-ci-bot | eeaf8d1db0 | |
yeyunpeng2020 | efcdd5ca3f | |
luopengting | 2807144c3b | |
luopengting | b81bdc5d58 | |
shenwei41 | e8150ea2e5 | |
mindspore-ci-bot | 4f4c43ca85 | |
mindspore-ci-bot | 5148bb21dd | |
mindspore-ci-bot | 6c5219a98a | |
wangshuide2020 | af66bc9026 | |
zhanghuiyao | 318187894b | |
mindspore-ci-bot | bd14e9a3a7 | |
mindspore-ci-bot | fc41e6806f | |
mindspore-ci-bot | 46cb019638 | |
mindspore-ci-bot | f9efbf571c | |
anyrenwei | 26016dc9b4 | |
mindspore-ci-bot | 627fbb7137 | |
mindspore-ci-bot | 938d91ea17 | |
mindspore-ci-bot | 606f119288 | |
mindspore-ci-bot | f3155e4bd8 | |
liuxiao93 | 1812bd137f | |
mindspore-ci-bot | d41eaefc90 | |
lilei | 9100992dfe | |
yuzhenhua | 15f43f8e8e | |
mindspore-ci-bot | cc8ae42862 | |
mindspore-ci-bot | b71b0f84e9 | |
lingyunli63 | 014981c550 | |
mindspore-ci-bot | 443d2a2029 | |
mindspore-ci-bot | 210a4a2490 | |
mindspore-ci-bot | 8f4b49fd16 | |
liuhe | f59fbe1cbe | |
mindspore-ci-bot | e4cd704962 | |
dinglinhe | e6a13acb66 | |
jiangshuqiang | 24f6da5984 | |
mindspore-ci-bot | a19f35ab14 | |
xiefangqi | cbd88f8b85 | |
mindspore-ci-bot | c29e6264c9 | |
lingyunli63 | 067a038ae6 | |
liuhe | 95c7648e66 | |
limingqi107 | 6b926e99eb | |
yuzhenhua | c2a7ee495c | |
chujinjin | 195f094c97 | |
mindspore-ci-bot | d5d65732f1 | |
mindspore-ci-bot | 250c3c6b17 | |
mindspore-ci-bot | 8c81ff8722 | |
mindspore-ci-bot | 55e392ed0c | |
qujianwei | ee023b2e02 | |
mindspore-ci-bot | 30757fe1e2 | |
mindspore-ci-bot | 49e601afe9 | |
sunsuodong | 42208998ec | |
dingpeifei | db07f1c63d | |
mindspore-ci-bot | e93cd6df85 | |
buxue | e44aa43fe4 | |
chendongsheng | c58c95b174 | |
mindspore-ci-bot | 5d2bc1cdf6 | |
lizhenyu | 0e0738a3c9 | |
mindspore-ci-bot | 3e32a0361c | |
mindspore-ci-bot | c8366f4474 | |
wangcong | a986c8bd18 | |
mindspore-ci-bot | 3cd99505c9 | |
mindspore-ci-bot | cf711058be | |
mindspore-ci-bot | b2de0c9007 | |
yuzhenhua | 6208c5e47f | |
mindspore-ci-bot | 7a67f28258 | |
mindspore-ci-bot | 205d342f24 | |
zhujingxuan | 04e15f0c1f | |
mindspore-ci-bot | df0d5a99be | |
mindspore-ci-bot | 1977d8e256 | |
mindspore-ci-bot | 5904836ee8 | |
mindspore-ci-bot | 817ff44ef5 | |
liuhe | 241d684835 | |
yao_yf | f1f50dd3d3 | |
mindspore-ci-bot | cbcb4ad5ae | |
mindspore-ci-bot | f0bd06dbed | |
zhaoting | b5260493f0 | |
wanyiming | 42d28fab5f | |
kswang | 4ccf305cc6 | |
chenlei_autodiff | 317ffd2585 | |
mindspore-ci-bot | 5908223718 | |
liuhe | b115ad8762 | |
tronzhang | b0ce27f1fc | |
laiyongqiang | 14d98103a8 | |
Jiaqi | 045474950b | |
shibeiji | cef5565c4b | |
mindspore-ci-bot | 12e8deff54 | |
mindspore-ci-bot | 7bb602a535 | |
mindspore-ci-bot | 7138acaec0 | |
mindspore-ci-bot | 86ac1f2688 | |
mindspore-ci-bot | 68cd837553 | |
mindspore-ci-bot | 2907c2cf97 | |
mindspore-ci-bot | 5995e32c78 | |
mindspore-ci-bot | 98bacbdb0e | |
mindspore-ci-bot | 8072164ceb | |
shenwei41 | ffeb4f7aca | |
mindspore-ci-bot | 323c42a5c0 | |
donghufeng | 0c6841730b | |
mindspore-ci-bot | 37d88db4a4 | |
hwjiaorui | f2c848281a | |
huangbingjian | 2997ec45b7 | |
wangnan39@huawei.com | e86aa7e92e | |
baihuawei | e1dff8c5ba | |
chenfei_mindspore | 0173be9dd2 | |
buxue | 168ee1b76b | |
mindspore-ci-bot | 16488ae427 | |
dinglinhe | bf5f93503b | |
q00596439 | 15ddee9635 | |
lihongkang | 307ad95eb5 | |
mindspore-ci-bot | 2d13cd7a5b | |
mindspore-ci-bot | 0ed87ecb81 | |
mindspore-ci-bot | 5acf3f2858 | |
mindspore-ci-bot | ede3f65d21 | |
mindspore-ci-bot | fd88e3a772 | |
mindspore-ci-bot | d0f0aa5d1e | |
wangdongxu | d0d688697e | |
mindspore-ci-bot | 8b19b69fc7 | |
liuhe | cb7e9c3840 | |
Lixia Chen | 0c00614a3e | |
mindspore-ci-bot | a419ef4dcd | |
caozhou | 3b05201d20 | |
Islam Amin | a6f0e49c2b | |
changzherui | b97b735b7e | |
mindspore-ci-bot | e675c3673f | |
mindspore-ci-bot | dedb75b5f1 | |
mindspore-ci-bot | 3f95838e14 | |
mindspore-ci-bot | 3fddd85b29 | |
mindspore-ci-bot | e161c0a15e | |
mindspore-ci-bot | 58b942024e | |
yanzhenxiang2020 | 1d7457efb5 | |
buxue | bec6c823bd | |
mindspore-ci-bot | af2d0cbb22 | |
mindspore-ci-bot | 593878cbe1 | |
mindspore-ci-bot | 3a3984c422 | |
mindspore-ci-bot | 91df14d78d | |
mindspore-ci-bot | eade4b2593 | |
mindspore-ci-bot | b946be5d30 | |
mindspore-ci-bot | c3c0c8b53e | |
mindspore-ci-bot | 7971282190 | |
mindspore-ci-bot | ed86ca22ab | |
zhaodezan | 60b7e2afbf | |
mindspore-ci-bot | 0ce5f4ca4d | |
mindspore-ci-bot | 74545e81e0 | |
w00535372 | fd009d815e | |
mindspore-ci-bot | 89a8b0f433 | |
mindspore-ci-bot | 9340eb62af | |
hebotao | 08cf6e7517 | |
lz | fa289f0017 | |
mindspore-ci-bot | d699de4e7f | |
xuanyue | b9cc44ef16 | |
liubuyu | d142b0cbf4 | |
mindspore-ci-bot | ade2d4a945 | |
mindspore-ci-bot | 4265b9c3a4 | |
mindspore-ci-bot | de03813269 | |
chendongsheng | 6c38cb0952 | |
mindspore-ci-bot | 4a80855c0a | |
lihongkang | a119423d96 | |
jiangzhenguang | 7266d3da83 | |
zhoufeng | e910aac72b | |
shibeiji | b405d1e28f | |
mindspore-ci-bot | f7231b2c5f | |
mindspore-ci-bot | e63ec83102 | |
YangLuo | c1e12e3e93 | |
wanyiming | eaf5a19724 | |
wangyanling | 92144103ba | |
wangnan39@huawei.com | 23c85bffa5 | |
zhanke | bffdc9b6a8 | |
ms_yan | 25960c72b7 | |
mindspore-ci-bot | d2f8464367 | |
CaoJian | 8252244562 | |
xutianchun | 5d3361fb55 | |
yanglf1121 | 48d26645e4 | |
mindspore-ci-bot | 8ceaa36efe | |
liuhe | 7175d3deef | |
zhaoting | e147feb3fc | |
lilei | 23ad659d58 | |
mindspore-ci-bot | 64f29e1c1c | |
mindspore-ci-bot | 61a34e76bf | |
mindspore-ci-bot | 5dcc949acf | |
mindspore-ci-bot | 7542acd793 | |
mindspore-ci-bot | 4aa1ec3618 | |
lichenever | 79e55be238 | |
wilfChen | 386ccb83e0 | |
mindspore-ci-bot | a118eca5e4 | |
huangbingjian | 6f0030f53b | |
Jiaqi | ff619e0356 | |
He Wei | e112f0fffa | |
Xiao Tianci | e135d07c61 | |
mindspore-ci-bot | 684398b93a | |
mindspore-ci-bot | cab364b876 | |
mindspore-ci-bot | 4228ea8982 | |
yuzhenhua | f250cecb19 | |
mindspore-ci-bot | f02d386094 | |
hwjiaorui | 41ed35e6b4 | |
mindspore-ci-bot | 776c44954d | |
mindspore-ci-bot | 41c8050693 | |
mindspore-ci-bot | b052b5f3cf | |
mindspore-ci-bot | 851f5c4834 | |
zhangzhaoju | f17bbcc49f | |
RobinGrosman | ca79a8fca3 | |
John Tzanakakis | 8c6ced51df | |
hwjiaorui | 0895bb1a48 | |
mindspore-ci-bot | 2edf6c54e7 | |
mindspore-ci-bot | 28cdf8a983 | |
yuzhenhua | e07fad0543 | |
mindspore-ci-bot | b0abf6b530 | |
mindspore-ci-bot | 83f55d974e | |
mindspore-ci-bot | 94a475abf4 | |
mindspore-ci-bot | 0f8330ec89 | |
mindspore-ci-bot | 1766f12888 | |
mindspore-ci-bot | 74668f0226 | |
mindspore-ci-bot | bdbbb82a1a | |
mindspore-ci-bot | 4f54cccccb | |
zhaoting | 229f6dde29 | |
mindspore-ci-bot | 2af7c51019 | |
zhaoting | b90e7ea196 | |
mindspore-ci-bot | a857993972 | |
TFBunny | 5453fc8744 | |
mindspore-ci-bot | dfd1198572 | |
Eric | 000956a5e9 | |
Margaret_wangrui | 06bbb24487 | |
Islam Amin | 2b57cc97a7 | |
Lixia Chen | d2aa1d7d89 | |
mindspore-ci-bot | a57c4e693d | |
tom__chen | fe7ffa1092 | |
mindspore-ci-bot | f7ff861fc3 | |
mindspore-ci-bot | a9fe5b5ca1 | |
mindspore-ci-bot | 65b13b5d67 | |
yangwei | f235c2218a | |
wangshuide2020 | c7148c3ab4 | |
liubuyu | 136fdc7144 | |
zhaoting | 519de5c32b | |
jonyguo | ad336fa544 | |
Xiaoda Zhang | 6016f2eab3 | |
mindspore-ci-bot | 5b5891bb14 | |
mindspore-ci-bot | a64c0eeb17 | |
mindspore-ci-bot | 7f3d76dbfa | |
mindspore-ci-bot | 9ad84db50e | |
mindspore-ci-bot | b9dc51e823 | |
mindspore-ci-bot | 010f40d2b6 | |
zhujingxuan | 1967825443 | |
mindspore-ci-bot | 27c9c2b7f5 | |
cy | 1cc75f7065 | |
mindspore-ci-bot | 16f44980c5 | |
wangshuide2020 | 67b1d265ca | |
q00596439 | dc61fb6ad2 | |
mindspore-ci-bot | 110996a423 | |
zhuyuxiao | 73aeca65e7 | |
mindspore-ci-bot | 1448560c5f | |
mindspore-ci-bot | 0235c08957 | |
huangbingjian | 021c50cbc5 | |
lixian | 5dd8deb3ac | |
jiangzhenguang | c796402735 | |
yanglf1121 | 91afcc6cb4 | |
cjh9368 | 2ebc368f21 | |
mindspore-ci-bot | ef27f1f663 | |
yujianfeng | 5a1b21b6c8 | |
mindspore-ci-bot | 923f7160b0 | |
l00591931 | 11e6bc270d | |
fangzehua | fd312a4682 | |
lichenever | 2abac948e0 | |
caojiewen | bdc3110466 | |
mindspore-ci-bot | cc996f9bdd | |
lvchangquan | 2ac61cf2b4 | |
huangbingjian | ca7f5dc58e | |
zengxianglong | 4869f19578 | |
xiefangqi | 07c26be796 | |
mindspore-ci-bot | 4dc159d3cd | |
mindspore-ci-bot | 5b08db8227 | |
zhoufeng | 6914f33705 | |
mindspore-ci-bot | 7bcd37ff8e | |
yeyunpeng2020 | af98da1731 | |
mindspore-ci-bot | 1c86e9d851 | |
w00535372 | fb3b8496a0 | |
mindspore-ci-bot | 86f0fb61d4 | |
zhujingxuan | a3f0a99613 | |
shenwei41 | 93d594a139 | |
jjfeing | b6b86904bd | |
hesham | 01d318050f | |
lixiaohui | dfd3320ffb | |
xulei2020 | 605f47c130 | |
mohammad | 139d83fc9a | |
changzherui | 97138db981 | |
mindspore-ci-bot | 5ab1171bb6 | |
mindspore-ci-bot | 902d4d35ca | |
mindspore-ci-bot | 664fe91075 | |
yao_yf | 96861fc3c9 | |
lixiaohui | 2cf08b5dc5 | |
Xiao Tianci | e1ed4b592e | |
mindspore-ci-bot | 2afa3a6eb9 | |
mindspore-ci-bot | 68093fa5d1 | |
mindspore-ci-bot | 9ec9164064 | |
mindspore-ci-bot | dd997faa30 | |
mindspore-ci-bot | 12bc103ec6 | |
mindspore-ci-bot | c1bb1b02ec | |
lichenever | 8ce979ec6c | |
mindspore-ci-bot | 16002ea438 | |
mindspore-ci-bot | 52c7385a55 | |
zhaoting | ae0ac6a837 | |
dinglinhe | f1780a03a4 | |
mindspore-ci-bot | de3a9a80cc | |
mindspore-ci-bot | b21637ed23 | |
buxue | 8f112f147b | |
mindspore-ci-bot | 8eb795fa48 | |
mindspore-ci-bot | 740b2abf3e | |
luopengting | 09c432aceb | |
ms_yan | efa69c7112 | |
mindspore-ci-bot | 3a7e803024 | |
liuhe | 04b3866375 | |
chendongsheng | 2a9f104ac0 | |
mindspore-ci-bot | f1636ef504 | |
xiefangqi | f63e359ac8 | |
wangyanling | 04aee9d007 | |
wuxuejian | af4b9c1234 | |
lvchangquan | 3637f61dab | |
huangbingjian | c5175129b1 | |
mindspore-ci-bot | 45629aaeb3 | |
mindspore-ci-bot | b6335cc743 | |
mindspore-ci-bot | 3fe401feb5 | |
YangLuo | 6f2b4fa665 | |
jjfeing | 7184a1d656 | |
mindspore-ci-bot | 14cb3d692b | |
mindspore-ci-bot | 384fafd3f2 | |
mindspore-ci-bot | 4178ac51ba | |
Zirui Wu | f275563333 | |
w00535372 | 57c4b74e94 | |
mindspore-ci-bot | 10514263eb | |
l00591931 | 4c0369aaeb | |
mindspore-ci-bot | b589483cd1 | |
mindspore-ci-bot | 424b068af7 | |
chujinjin | 450d94733c | |
chujinjin | 31ee29e7d5 | |
dinglinhe | ba0b21f884 | |
mindspore-ci-bot | f2f2af9105 | |
zhujingxuan | 94346c241a | |
mindspore-ci-bot | 72763713be | |
mindspore-ci-bot | 48854db845 | |
zhaozhenlong | b2e795f706 | |
Margaret_wangrui | 3824c91283 | |
mindspore-ci-bot | 98082e44f9 | |
mindspore-ci-bot | c3eea27fab | |
liuhe | 6b4f421287 | |
mindspore-ci-bot | 8dacfb6988 | |
liuyu | ab3b6cf7e9 | |
mindspore-ci-bot | a0e21bfe98 | |
陈劢 | fc44aa1d20 | |
yangchun | 278e8dd575 | |
mindspore-ci-bot | e09ad7c1f6 | |
范吉斌 | 519c1d85dc | |
mindspore-ci-bot | a5234a9ef0 | |
yao_yf | c9f3bd7cea | |
xutianchun | d365f0a296 | |
luoyang | 93671501bf | |
z00512249 | e1b41e6b27 | |
mindspore-ci-bot | 78092ada25 | |
mindspore-ci-bot | 9777199f75 | |
mindspore-ci-bot | 6663c36f7b | |
mindspore-ci-bot | 59ab43c851 | |
mindspore-ci-bot | e7055e37fe | |
mindspore-ci-bot | fa39610bf4 | |
mindspore-ci-bot | 0f8bd6a0ee | |
caifubi | 2c833bebbe | |
yanghaoran | ee05460a76 | |
zhujingxuan | 18a86d6ce1 | |
zhangxinfeng3 | c1935bf187 | |
mindspore-ci-bot | 778b72dba0 | |
xulei2020 | e8fcbe7a1c | |
mindspore-ci-bot | 18a3d3beca | |
mindspore-ci-bot | 0ebf0391cd | |
hukang hwx963878 | 2447000ca3 | |
mindspore-ci-bot | f2fd3c5e85 | |
yangruoqi713 | 91523fa7b7 | |
zhuyuxiao | 7fae5734be | |
mindspore-ci-bot | 96ebe7edf1 | |
ms_yan | 35ab6076fc | |
mindspore-ci-bot | 654b41fa13 | |
wang_shaocong | 14bbc01a7d | |
mindspore-ci-bot | d793e8fa18 | |
mindspore-ci-bot | 9ad40b0923 | |
mindspore-ci-bot | 5723726ec7 | |
liyong | a83c9bdd6a | |
mindspore-ci-bot | b7c3e2baaf | |
yanghaoran | b8e3322d69 | |
mindspore-ci-bot | 7e21427a2a | |
mindspore-ci-bot | 69ce0c1c0a | |
mindspore-ci-bot | 1fb9400357 | |
mindspore-ci-bot | 96d56f2d64 | |
mindspore-ci-bot | 0686767e7d | |
ms_yan | 3073757668 | |
mindspore-ci-bot | 934669f947 | |
mindspore-ci-bot | 2c6fdea1f9 | |
mindspore-ci-bot | 2e7a37c638 | |
yao_yf | 2934081ccc | |
mindspore-ci-bot | 6a76fa36b4 | |
yangwei | 04655215dc | |
mindspore-ci-bot | e13e2f1333 | |
yangjie159 | 4e8d26f823 | |
yanghaoran | f2b7fcf73a | |
l00591931 | 05923d01ba | |
liuhe | 7b72995f99 | |
mindspore-ci-bot | 33524f2c51 | |
luopengting | 56bc6b4eb6 | |
mindspore-ci-bot | 434d2408e2 | |
mindspore-ci-bot | 49d522e8a7 | |
zhouneng | 9b8c2970bf | |
mindspore-ci-bot | 8129600138 | |
huangbingjian | 3ac9e94a9d | |
ling | 72e665c447 | |
yepei6 | 548225286d | |
hwjiaorui | ac69574b7c | |
mindspore-ci-bot | 408b23380e | |
mindspore-ci-bot | a2edccd309 | |
mindspore-ci-bot | 1991387896 | |
mindspore-ci-bot | cfe336e54e | |
mindspore-ci-bot | 168f5ed819 | |
mindspore-ci-bot | d26cf2c2be | |
xiefangqi | 9a670476c7 | |
buxue | 37132676c4 | |
mindspore-ci-bot | 6cb94b0009 | |
mindspore-ci-bot | c32d4bb33f | |
mindspore-ci-bot | f26034cce0 | |
shenwei41 | effe323051 | |
liubuyu | 8065e49b3b | |
mindspore-ci-bot | 27c947265e | |
liuxiao93 | f9e27c0219 | |
mamba_ni | dc9ab64b71 | |
xiefangqi | 0450b1c59f | |
shenwei41 | efcb9d951f | |
liuyang_655 | cee23b3c88 | |
changzherui | 4ce36663a9 | |
mindspore-ci-bot | 38b7c94a98 | |
tanghuikang | 3e9ca6e1ca | |
mindspore-ci-bot | 0190268af3 | |
mindspore-ci-bot | 9580016d46 | |
gzhcv | 303768dc42 | |
mindspore-ci-bot | 1c76cb759e | |
mindspore-ci-bot | 1061b721cb | |
Ziyan | a88df2401f | |
mindspore-ci-bot | 9a016047b3 | |
laiyongqiang | 88f8c3effb | |
mindspore-ci-bot | f9801a79e5 | |
yuchaojie | 913e586d65 | |
liuhe | bd0fde5dc8 | |
hwjiaorui | 67c58e6e29 | |
mindspore-ci-bot | eabf675391 | |
xulei2020 | 97b4443268 | |
mindspore-ci-bot | 4d69af0641 | |
mindspore-ci-bot | 8ba364e042 | |
mindspore-ci-bot | 4f1f35a96e | |
caifubi | 272600f6a2 | |
sunsuodong | e8b8f61cd0 | |
mindspore-ci-bot | b07ab1d5a1 | |
mindspore-ci-bot | fd8fe51a48 | |
z00512249 | ef1adf07f2 | |
Jiaqi | 19edbac71f | |
mindspore-ci-bot | 3dd79a6b52 | |
mindspore-ci-bot | ed7af5fb82 | |
huangmengxi | 084f13b59e | |
mindspore-ci-bot | 38600625c0 | |
mindspore-ci-bot | c733af758b | |
mindspore-ci-bot | 42f6d71560 | |
mindspore-ci-bot | 9262f7f5ac | |
yanghaoran | 45f593311d | |
mindspore-ci-bot | a5f7348a73 | |
mindspore-ci-bot | 256555d8a5 | |
xutianchun | a2deb3de87 | |
jonyguo | 7a1eb7183e | |
zhaozhenlong | f238602d00 | |
zhujingxuan | e73a448e3c | |
mindspore-ci-bot | 3ab3fa5341 | |
buxue | 19847d7591 | |
yuzhenhua | c9028157ed | |
mamba_ni | 3dff9e5050 | |
mindspore-ci-bot | 25cef5716e | |
w00535372 | 54565b49e9 | |
mindspore-ci-bot | 9d7130a784 | |
yepei6 | d6a5d42c61 | |
LianLiguang | 9af15415d2 | |
mindspore-ci-bot | 2f3c0fb9b8 | |
liuhe | b8590086ca | |
mindspore-ci-bot | 2872602f4b | |
liubuyu | 4540ca0087 | |
xiefangqi | 9bea918b5f | |
mindspore-ci-bot | b69e30d957 | |
alouhahaha | d8b38157da | |
mindspore-ci-bot | 37a451a65f | |
mindspore-ci-bot | ec083ca5ed | |
mindspore-ci-bot | 34f647dc32 | |
mindspore-ci-bot | 5b211b9546 | |
jianghui58 | 1f97d5a2d2 | |
mindspore-ci-bot | afde4e3bf9 | |
mindspore-ci-bot | 52a5821149 | |
mindspore-ci-bot | 0e85b4a9d8 | |
lilei | 197bc44737 | |
mindspore-ci-bot | 7ba3325da1 | |
liuxiao93 | 9a4be0dd31 | |
mindspore-ci-bot | e5f2c98dcd | |
changzherui | 5b3ffee257 | |
zhoufeng | a3a87d0fa8 | |
zhoufeng | b868ea4ab6 | |
lvchangquan | a2a08732ed | |
mindspore-ci-bot | 14d2ebd0c9 | |
mindspore-ci-bot | d6a2f7ea69 | |
wangdongxu | 306761da3b | |
mindspore-ci-bot | 2ad6a2d9cf | |
mindspore-ci-bot | 188a5c07fd | |
mindspore-ci-bot | 1a07d46a16 | |
mindspore-ci-bot | 46ed0928b1 | |
yelihua | 92f9ddea05 | |
yanglf1121 | 92bcb20591 | |
baihuawei | 3716a0da1a | |
mindspore-ci-bot | cff34f34b5 | |
lixian | 16c506c51f | |
ling | 0071855468 | |
yangruoqi713 | 10b028e94d | |
louei5 | e90c59a7c5 | |
mindspore-ci-bot | b1ce20ebf8 | |
linqingke | 1914bdb640 | |
mindspore-ci-bot | 25f8f820c3 | |
VectorSL | 8407ccf455 | |
yepei6 | 9e68a1ae8b | |
lixian | 92d0070fd2 | |
yeyunpeng2020 | b01077152e | |
mindspore-ci-bot | 5f01223ca4 | |
liuhe | 7862128ace | |
mindspore-ci-bot | 7c7b866af3 | |
xutianchun | bdd772af16 | |
Eric | ba4d679149 | |
yefeng | c432bc7207 | |
mindspore-ci-bot | 6a5dc3f568 | |
RobinGrosman | be56ef6efc | |
caifubi | 29d120a745 | |
yangwei | 9d45c4c711 | |
mindspore-ci-bot | 1b9a5563e5 | |
mindspore-ci-bot | 74a1f689e5 | |
mindspore-ci-bot | ed5891b382 | |
mindspore-ci-bot | 94beb55913 | |
shenwei41 | 1c68c2b61f | |
mindspore-ci-bot | f9632a1e71 | |
mindspore-ci-bot | 13c238a9ae | |
lixian | 743ff9b5c7 | |
mindspore-ci-bot | 5bb9e8ad9f | |
mindspore-ci-bot | e000b03046 | |
mindspore-ci-bot | 59b81166c3 | |
mindspore-ci-bot | d775166f57 | |
luopengting | a50d4f22df | |
mindspore-ci-bot | 98eb7cf0bd | |
mindspore-ci-bot | 37697afa36 | |
liuhe | 3ea4d5508f | |
yeyunpeng2020 | ddd1a89f21 | |
mindspore-ci-bot | 2457fe476f | |
q00596439 | a65f2072c3 | |
huangmengxi | 85f39a0b50 | |
mindspore-ci-bot | bc76d35a42 | |
mindspore-ci-bot | 4d54c21ac7 | |
wangdongxu | 8b48b6bf0c | |
mindspore-ci-bot | 7a56ee2149 | |
liuxiao93 | e21eac9659 | |
buxue | 016a8a12b8 | |
mindspore-ci-bot | 35b66bc0a8 | |
yeyunpeng2020 | 40602be929 | |
yepei6 | 16ad587915 | |
caifubi | 882d848272 | |
dingpeifei | 0f352fc6a7 | |
ms_yan | feb2e6f59d | |
mindspore-ci-bot | 48ed9e9d82 | |
pkuliuliu | 564a30c149 | |
mindspore-ci-bot | 5645b7b93b | |
lixian | eb73921491 | |
mindspore-ci-bot | 689ee712e9 | |
luopengting | dd7858ef2c | |
Jiaqi | c371834840 | |
mindspore-ci-bot | 322776ef5e | |
luopengting | be374a79a6 | |
yeyunpeng2020 | e303037429 | |
mindspore-ci-bot | e4b70e5a55 | |
yangruoqi713 | ed1200683b | |
mindspore-ci-bot | bbf92f0a48 | |
mindspore-ci-bot | c0314f9332 | |
mindspore-ci-bot | 55db218a79 | |
mindspore-ci-bot | db19354ce1 | |
mindspore-ci-bot | 0609846a81 | |
mindspore-ci-bot | 93641259bb | |
jiangzhenguang | 7fd5508f59 | |
luopengting | 34d5d32c0e | |
zhangxinfeng3 | 4da6a87d71 | |
lilongfei | ffe99439c9 | |
mindspore-ci-bot | 72c3317c4f | |
mindspore-ci-bot | 5472e704bf | |
luopengting | 0d1d8e68f2 | |
z00512249 | 8fa4974390 | |
yepei6 | 101ddcc7b1 | |
mindspore-ci-bot | 149ca9721b | |
mindspore-ci-bot | 73fd1313d5 | |
wangnan39@huawei.com | f6821513f4 | |
mindspore-ci-bot | 541809f173 | |
mindspore-ci-bot | 74f258b3bf | |
mindspore-ci-bot | 2515d38fef | |
mindspore-ci-bot | b7f53a573b | |
zhanghuiyao | fe3ddb4412 | |
yanzhenxiang2020 | 955b2c5ebf | |
mindspore-ci-bot | 5c48f331c4 | |
mindspore-ci-bot | b43bbcc1e1 | |
mindspore-ci-bot | ee18d1a18b | |
mindspore-ci-bot | 5cab278038 | |
mindspore-ci-bot | 31780edd45 | |
lujiale | f13224ba88 | |
yuyiyang_3418 | ce1ddaa6a9 | |
shenwei41 | 288da60b0f | |
mindspore-ci-bot | b83a09f3db | |
lilongfei | 7b3cbdb609 | |
lilongfei | 39fbf69674 | |
q00596439 | 61e96f2167 | |
liangzelang | 8bfa130d5b | |
zlq2020 | 40deecd153 | |
gaoyong10 | 930a5a19c6 | |
donghufeng | 7465a98cd6 | |
yang_chun | fb1cfc40e3 | |
mindspore-ci-bot | 720c571975 | |
lvliang | 6b7e591fd8 | |
liuhe | b21d57844b | |
mindspore-ci-bot | 331de218f1 | |
mindspore-ci-bot | 3509ddf617 | |
mindspore-ci-bot | 83baa8f6a9 | |
TFBunny | ddf103ed16 | |
mindspore-ci-bot | 9a62a3c0b4 | |
mindspore-ci-bot | 496ba58ba3 | |
zhoufeng | 49d48f38ae | |
mindspore-ci-bot | 1275124f60 | |
mindspore-ci-bot | b36a926e66 | |
YangLuo | 5b8fae497e | |
mindspore-ci-bot | 9fde327048 | |
gengdongjie | f96ae7eead | |
q00596439 | 46041d7736 | |
mindspore-ci-bot | d44b2ec016 | |
mindspore-ci-bot | 9eaf11e525 | |
mindspore-ci-bot | cd1632dc5f | |
huangxinjing | ac3037530d | |
mindspore-ci-bot | 176e6f0b75 | |
baihuawei | 8430ca147c | |
mindspore-ci-bot | 1b6c817b7f | |
mindspore-ci-bot | 9f6f5e9d0d | |
mindspore-ci-bot | 9a4cec5389 | |
mindspore-ci-bot | 7a4c8ba61b | |
mindspore-ci-bot | 4f8c60670c | |
mindspore-ci-bot | 071884fc3f | |
mindspore-ci-bot | c008eb0aa1 | |
mindspore-ci-bot | dd33bccc33 | |
wangrao | 76a70e63bc | |
mindspore-ci-bot | adccbe4499 | |
mindspore-ci-bot | 2c72cc1ecc | |
mindspore-ci-bot | 96bbb6a39c | |
liuxiao93 | 5c480609da | |
changzherui | fca9cd3a10 | |
zhujingxuan | 406cbbb4e5 | |
zhaozhenlong | 66ab171ebc | |
mindspore-ci-bot | 48e2ede91d | |
liuhe | 179fc3487c | |
mindspore-ci-bot | 2a79ed5bb3 | |
mindspore-ci-bot | 245fc4252a | |
mindspore-ci-bot | 87b3770c24 | |
louei5 | 8f8c7d2e0e | |
z00512249 | cc349a4dc2 | |
mindspore-ci-bot | e8167083e2 | |
mindspore-ci-bot | 1f729451a3 | |
mindspore-ci-bot | e46ff26e75 | |
linqingke | be07d46a87 | |
mindspore-ci-bot | 2af439efe8 | |
cjh9368 | 738598af28 | |
mindspore-ci-bot | 568f3ceefd | |
mindspore-ci-bot | 7e24d2445f | |
mindspore-ci-bot | 06ae82c07f | |
mindspore-ci-bot | 981c26e63f | |
yanzhenxiang2020 | 98a4d510fd | |
w00535372 | 1ae00a01da | |
mindspore-ci-bot | 0f833b595a | |
mindspore-ci-bot | da43e9f5c8 | |
mindspore-ci-bot | 20a7b5cfea | |
mindspore-ci-bot | 722fa09edb | |
mindspore-ci-bot | f8db0dd246 | |
mindspore-ci-bot | 8dde88f97f | |
yepei6 | 9be9435c49 | |
mindspore-ci-bot | 49568d61d2 | |
mindspore-ci-bot | 32f7887198 | |
mindspore-ci-bot | b702f806f1 | |
mindspore-ci-bot | b16465360f | |
zhaozhenlong | a0c1cfab22 | |
mindspore-ci-bot | beb855a4a3 | |
caifubi | e3932a3de7 | |
zhanyuan | 48da8d47a4 | |
l00591931 | 88e5658587 | |
mindspore-ci-bot | 5a024ecdd8 | |
wuxuejian | 8f338c3f52 | |
mindspore-ci-bot | 5fa9840a59 | |
lilei | 91bed433b0 | |
lvchangquan | 1531271623 | |
caozhou | 16d2289979 | |
lixian | 0f2acd10b4 | |
mindspore-ci-bot | 8edc24700b | |
zhujingxuan | cc858f6f72 | |
yangruoqi713 | 49e5a32606 | |
dingpeifei | 8602e97923 | |
mindspore-ci-bot | 2c083bc7eb | |
z00512249 | b243633551 | |
huangmengxi | 6c6c9ebd6c | |
yeyunpeng2020 | a7970b8320 | |
l00591931 | 50eea9fee8 | |
mindspore-ci-bot | e5a8d6abdd | |
lzk | 889b1116e7 | |
mindspore-ci-bot | 62a6c8e1f5 | |
mindspore-ci-bot | e16630c844 | |
mindspore-ci-bot | da758e6d6c | |
mindspore-ci-bot | ea306ab000 | |
mindspore-ci-bot | 6a9e55a382 | |
mindspore-ci-bot | 5865639c7a | |
mindspore-ci-bot | f9d565deba | |
mindspore-ci-bot | bf12c05f8f | |
mindspore-ci-bot | dedc229d2f | |
mindspore-ci-bot | bbc7bd763a | |
mindspore-ci-bot | c72c1209d9 | |
mindspore-ci-bot | 63dcac269f | |
mindspore-ci-bot | d6273a3c86 | |
mindspore-ci-bot | 610297ba40 | |
Zhang Qinghua | 5178ed7cb8 | |
huangbingjian | c8872a582d | |
mindspore-ci-bot | e2c5b331bd | |
xutianchun | c829fc5544 | |
mindspore-ci-bot | 3271ee2c52 | |
xsmq | 1e3a20c5c9 | |
tanghuikang | bdf48e2341 | |
xiefangqi | 9da6205827 | |
mindspore-ci-bot | c07feca6a9 | |
mindspore-ci-bot | 5f3f827117 | |
zengxianglong | 4ba2a2f913 | |
mindspore-ci-bot | 1e486b6f34 | |
caifubi | b073f277f1 | |
yefeng | f9159f8fc0 | |
wang_shaocong | 1224733f2d | |
limingqi107 | 92ba89b88e | |
wangshuide2020 | 847c850744 | |
liuhe | cbd357549f | |
kswang | 8e1954ba55 | |
mindspore-ci-bot | 1e19304c91 | |
mindspore-ci-bot | fe52fb4668 | |
liangzelang | 02c1954d45 | |
lihongkang | ed6e49fc82 | |
yangjie159 | c2e2c8d93e | |
mindspore-ci-bot | 6f80fa683b | |
mwang | 7366d5cf1b | |
mindspore-ci-bot | 67b68c1bd2 | |
mindspore-ci-bot | 35e8165fcc | |
z00512249 | 2a28b4ee41 | |
mindspore-ci-bot | c78e0f32b3 | |
mindspore-ci-bot | d7bd79244a | |
yuyiyang_3418 | ba7eddb947 | |
mindspore-ci-bot | 447aed7b13 | |
yanzhenxiang2020 | 245575693b | |
mindspore-ci-bot | 0084f0aa43 | |
zhangyi | ff0cb148e4 | |
mindspore-ci-bot | ececef5ed0 | |
mindspore-ci-bot | ffbfcd08a9 | |
mindspore-ci-bot | 407b25887b | |
mindspore-ci-bot | a5a3a9b508 | |
mindspore-ci-bot | 9cb27e5617 | |
mindspore-ci-bot | 86d3a8eea0 | |
mindspore-ci-bot | f0bb74c432 | |
caojiewen | 17b21c7e05 | |
panfei | 89dcc5cec0 | |
GAO_HYP_XYJ | 6a63b78bf2 | |
zhuyuxiao | fd93ca0dcd | |
zhoufeng | 6f160674f3 | |
lixian | 7a84f23ae5 | |
mindspore-ci-bot | 0b1686e0b3 | |
mindspore-ci-bot | 437ef101db | |
yangjie159 | 60eb1e9c88 | |
mindspore-ci-bot | 24a0bfb6a3 | |
zengxianglong | 33583ce794 | |
linqingke | 7cc06b483e | |
xiefangqi | e1651ac1db | |
zhanke | 6504d04f1c | |
wsq3 | 7524bb97ef | |
zhaoting | 83a2a87cf3 | |
zhanghuiyao | 999ccc1d4b | |
dingpeifei | eb9f9ce38b | |
mindspore-ci-bot | 05a99e996d | |
mindspore-ci-bot | 72eedef82e | |
yao_yf | 6523c69b37 | |
lzk | 442cd62d3d | |
mindspore-ci-bot | 682a2f7209 | |
Peilin Wang | 8350a31d4c | |
TFBunny | 576334179d | |
shenwei41 | a32a483e97 | |
yepei6 | 995d9a027e | |
yuchaojie | 05214d2d24 | |
mindspore-ci-bot | e3b6f6f0a1 | |
liuxiao93 | a9b0d3f731 | |
z00512249 | 900f01af0b | |
huangbingjian | 5a73a26fee | |
mindspore-ci-bot | 2a4e4d2a06 | |
mindspore-ci-bot | 0c44007e55 | |
sunsuodong | dcf0a1ecb1 | |
mindspore-ci-bot | 89aaa7ed89 | |
chengxianbin | 2039d6c9d4 | |
mindspore-ci-bot | 7dffe5a693 | |
yefeng | e0194cf056 | |
zhujingxuan | f874a35aa9 | |
mindspore-ci-bot | f30a0281a5 | |
mindspore-ci-bot | ca5b253a56 | |
mindspore-ci-bot | 012af6f023 | |
yuzhenhua | bb1ef1b547 | |
unknown | 6ab995b9c6 | |
z00512249 | 5d9124315a | |
simson | 4b3bee2531 | |
yangwei | c8f829fac9 | |
He Wei | eaa45227f0 | |
mindspore-ci-bot | 12db983888 | |
chenhaozhe | acdc196869 | |
mindspore-ci-bot | 24189d7518 | |
mindspore-ci-bot | 433e0f6900 | |
mindspore-ci-bot | 11ae248664 | |
mindspore-ci-bot | 83df79c2ce | |
chendongsheng | 9e7c256598 | |
hanhuifeng2020 | 5ae6a9459f | |
q00596439 | ec30d857a4 | |
liyong | 6aecbf6ee4 | |
liuhe | b12de37856 | |
fuzhiye | 41ce6dc30d | |
mindspore-ci-bot | abc929c247 | |
yangjie159 | 2682a72944 | |
mindspore-ci-bot | 162bb4e293 | |
mindspore-ci-bot | bbfffc0c2c | |
caifubi | 9cf7773c0c | |
changzherui | cef469b6e2 | |
mindspore-ci-bot | a708ded285 | |
wangnan39@huawei.com | 8f8ec5c847 | |
mindspore-ci-bot | 3d74caba98 | |
z00512249 | 60bd9248f2 | |
mindspore-ci-bot | cf7ccf2903 | |
yuchaojie | 102d4b4024 | |
mindspore-ci-bot | 84a4b119bc | |
fuzhiye | 0b1e7aebcd | |
mindspore-ci-bot | 6f81d157aa | |
mindspore-ci-bot | 16bcb2a46d | |
mindspore-ci-bot | efdf9638e5 | |
mindspore-ci-bot | 1416db06e5 | |
zhujingxuan | f32235e4d4 | |
mindspore-ci-bot | 49a60f97cf | |
mindspore-ci-bot | 7b3cf341af | |
mindspore-ci-bot | 206c0d5023 | |
wang_shaocong | 2dbfe89f9d | |
lixian | 12e82a5dc7 | |
yangjie159 | 4b801e18b3 | |
z00512249 | 6f4fb69f82 | |
chendongsheng | a2f3ac5af3 | |
yepei6 | 48d50d97e6 | |
mindspore-ci-bot | b7479a7bf7 | |
mindspore-ci-bot | 94315bee8e | |
wangshuide2020 | d307320d1a | |
mindspore-ci-bot | 9f6d04c17f | |
luopengting | 21411e2ffd | |
lzk | 284c84a7da | |
yangruoqi713 | 1c20ddbc7c | |
mindspore-ci-bot | e4b6377149 | |
mindspore-ci-bot | 4c02d67cbf | |
mindspore-ci-bot | 4cba978b05 | |
mindspore-ci-bot | 16085022e3 | |
mwang | 34156d24d5 | |
mindspore-ci-bot | 28f953cfbd | |
mindspore-ci-bot | cdaf1132cb | |
yangruoqi713 | c557b8f167 | |
mindspore-ci-bot | 0763dfa3bb | |
mindspore-ci-bot | b97656d3e8 | |
mindspore-ci-bot | afc798744d | |
zengxianglong | 5acc8d1395 | |
hangangqiang | 7132ea5e12 | |
lilei | fa17b9b70d | |
zhangyi | 11283ad812 | |
yangjie159 | abd3033e9b | |
Ziyan | d381fb50a4 | |
liubuyu | 30cd398158 | |
xuanyue | d9b2fe4266 | |
Zhang Qinghua | ad11fd9ba5 | |
mindspore-ci-bot | 340583367f | |
mindspore-ci-bot | 79eeae8f44 | |
mindspore-ci-bot | defcc0a074 | |
mindspore-ci-bot | 3d403455e7 | |
mindspore-ci-bot | b12b2248e8 | |
yanglf1121 | f41687bde9 | |
mindspore-ci-bot | d6fb43e148 | |
mindspore-ci-bot | 9e00f9facb | |
looop5 | ceb581dfa7 | |
sunsuodong | 5cd3f01eb2 | |
mindspore-ci-bot | 12c77a231f | |
mindspore-ci-bot | a4570a1dfb | |
sunsuodong | 0b9a7d7967 | |
buxue | de343a0e00 | |
mindspore-ci-bot | df4a3cdf22 | |
mindspore-ci-bot | 3b9843f57e | |
yujianfeng | 3b521a1e18 | |
xutianchun | 7d181e9c99 | |
mindspore-ci-bot | 3fbeb9d701 | |
mindspore-ci-bot | 2666ec4df5 | |
mindspore-ci-bot | d5eac8a91b | |
mindspore-ci-bot | 9e84b421dc | |
lzk | c749916ac6 | |
zhoufeng | 734cc674a5 | |
lzk | 92f16b35c6 | |
zhengjun10 | 76dc0e4ae8 | |
ms_yan | 6470ef8447 | |
CaoJian | 21a2c7bcbe | |
lixian | 9722a014c0 | |
zhujingxuan | 53b68c2444 | |
mindspore-ci-bot | b0f5781477 | |
mindspore-ci-bot | 674fa9b68c | |
mindspore-ci-bot | a795835fd8 | |
mindspore-ci-bot | 06ba400604 | |
shenwei41 | 8b5871ef44 | |
mindspore-ci-bot | 5fb50ca6b7 | |
mindspore-ci-bot | e557d81c4f | |
mindspore-ci-bot | 0a5185c6a5 | |
zhaoting | 8754aaeb74 | |
mindspore-ci-bot | dd2c2458a2 | |
mindspore-ci-bot | 64d9b5169a | |
xutianchun | db6b45ee4d | |
z00512249 | f3be546e61 | |
chenyijie6 | 99ebe71b39 | |
louei5 | fd51088da8 | |
mindspore-ci-bot | d165f0e6f3 | |
YangLuo | e73e20928b | |
Xiao Tianci | 16ee8891ab | |
xiefangqi | 53a7bc6ec4 | |
xsmq | f98109aa5a | |
liubuyu | f303f5ff6e | |
mindspore-ci-bot | 0451a800bc | |
mindspore-ci-bot | b5396bc1cd | |
mindspore-ci-bot | 6b8bef2c8a | |
mindspore-ci-bot | 1ecb0fde8f | |
mindspore-ci-bot | 7de2d7b331 | |
mindspore-ci-bot | 3539952b66 | |
CaoJian | 27e933ec86 | |
zhangyi | 69ee1a077f | |
mindspore-ci-bot | 83d4c8dbe3 | |
mindspore-ci-bot | b75418bfb5 | |
mindspore-ci-bot | 5b95409022 | |
zhengqihao | f3a1a69ce5 | |
mindspore-ci-bot | 2fadad0875 | |
mindspore-ci-bot | 38b5ff71ad | |
shenwei41 | f83c9f19ba | |
mindspore-ci-bot | 8e8f3043f9 | |
ttudu | dad3172abb | |
mindspore-ci-bot | 1d505ebad3 | |
mindspore-ci-bot | 72cc142a0b | |
mindspore-ci-bot | 94841e4889 | |
mindspore-ci-bot | 6c096bff31 | |
mindspore-ci-bot | caaff9ab03 | |
mindspore-ci-bot | 9a4ccaf913 | |
wsq3 | 8bdea4ab54 | |
mindspore-ci-bot | f7ff8e81cd | |
mindspore-ci-bot | b605fdb3cf | |
chenjianping | 0cb6117709 | |
mindspore-ci-bot | 5d410342e9 | |
yoni | cd6d9131e0 | |
mindspore-ci-bot | 7607e92877 | |
mindspore-ci-bot | f2bf380b85 | |
askmiao | 4e39eab473 | |
wenfangpei | 043a558ae2 | |
xiefangqi | b9f45b49ff | |
mindspore-ci-bot | dd9f227fd6 | |
mindspore-ci-bot | e8cb23e35e | |
Eric | a3c98d9d59 | |
lzk | 5ed08ebe51 | |
mindspore-ci-bot | a7c1b6a1ef | |
mindspore-ci-bot | b22d4f99a5 | |
TFBunny | 32e86f4166 | |
zengxianglong | 4aed10f552 | |
mindspore-ci-bot | 0fb8cd888d | |
looop5 | 7c5decd880 | |
mindspore-ci-bot | c520f3deef | |
gzhcv | 95b7a6bcfb | |
yangjie159 | 72963bd2ea | |
mindspore-ci-bot | 7d5701c3e9 | |
mindspore-ci-bot | e99e98e2e0 | |
mindspore-ci-bot | 255d8e50da | |
yuchaojie | 483ab836c9 | |
mindspore-ci-bot | 103922cde5 | |
zhaodezan | b1cea1bc56 | |
mindspore-ci-bot | 64a93c2089 | |
zhaoting | 26457a8ee3 | |
mindspore-ci-bot | aede003317 | |
mindspore-ci-bot | 4a54caa721 | |
Margaret_wangrui | aeb43e5167 | |
liyong | f4ca8b6783 | |
yangwei | e34b2873fa | |
mindspore-ci-bot | 93c21f99e2 | |
zengzitao | d0a656f3cd | |
dingpeifei | 87e41aaeee | |
mindspore-ci-bot | 90d1f24b9a | |
mindspore-ci-bot | bf29da4bd5 | |
liubuyu | 23ae84a023 | |
mindspore-ci-bot | 7cd339368d | |
mindspore-ci-bot | 4d184cdbb1 | |
simson | dc8a279eb6 | |
mindspore-ci-bot | eaecc83ec2 | |
huangbo77 | 5765337238 | |
looop5 | 1e6a35cb78 | |
liuyu | 9914f37eae | |
mindspore-ci-bot | b52f0ced25 | |
mindspore-ci-bot | f921921c56 | |
mindspore-ci-bot | fb45054477 | |
zhousiyi | 3f2a08c1d0 | |
mindspore-ci-bot | f5393aaf20 | |
hedongdong | b9a773955c | |
zhuyuxiao | a11d17db1a | |
luopengting | 23ba47df6c | |
wangzhe | 70af1d1615 | |
YangLuo | f99204b292 | |
He Wei | 01eaaed85f | |
xutianchun | 344df139dc | |
yangjie159 | 08efea8791 | |
mindspore-ci-bot | 0bd1e34a4d | |
mindspore-ci-bot | 596df720af | |
mindspore-ci-bot | c75fa654a3 | |
mindspore-ci-bot | 83fd1691b1 | |
mindspore-ci-bot | 075d737127 | |
mindspore-ci-bot | d6ddd4a107 | |
mindspore-ci-bot | a5ef5e2e84 | |
mindspore-ci-bot | fd811b4dd0 | |
mindspore-ci-bot | f17689b39f | |
huangbingjian | f4ac2c7dbd | |
zhangyi | d9b152f2ef | |
mindspore-ci-bot | 83b56cac85 | |
mindspore-ci-bot | defcc51641 | |
changzherui | c87547836d | |
LianLiguang | cd7ff5e60b | |
wilfChen | a46d0c55fb | |
mindspore-ci-bot | ad1658b928 | |
lvliang | c66f21b1e8 | |
chenhaozhe | 15d37e5db9 | |
yangruoqi713 | a0e0bb5f68 | |
mindspore-ci-bot | 34b16e6a64 | |
mindspore-ci-bot | 4b319bbac7 | |
mindspore-ci-bot | 4e1e16c335 | |
fuzhiye | 6647e0e6ac | |
mindspore-ci-bot | 4a1b9e4a12 | |
hukang hwx963878 | 79aff67925 | |
mindspore-ci-bot | b45f289801 | |
mindspore-ci-bot | e8c9b52fb5 | |
z00512249 | 54708a8714 | |
mindspore-ci-bot | 1af7f0fd1e | |
mindspore-ci-bot | d51e1e0a9d | |
mindspore-ci-bot | dd90f7f055 | |
mindspore-ci-bot | 6acf4938e5 | |
mindspore-ci-bot | f446abe4ca | |
mindspore-ci-bot | 69bbf161e9 | |
mindspore-ci-bot | 06107de3f7 | |
xcnick | 3ae92a5e43 | |
Cathy Wong | 1775c4e83d | |
buxue | 30891f34cc | |
chendongsheng | 5ebd8fd391 | |
shenwei41 | 89f109a3f3 | |
xiefangqi | f460fc1cb5 | |
Xiao Tianci | 4f1dbc6cd5 | |
Jiaqi | c8e866959f | |
zhaozhenlong | 9355cd9490 | |
chenzupeng | eade4d8014 | |
He Wei | ce690a5489 | |
baihuawei | b71da51d86 | |
huangmengxi | 38b49fb30e | |
yangruoqi713 | 4e9002329f | |
zhengjun10 | 91384f6955 | |
yeyunpeng2020 | 2c8ab3f483 | |
Zhang Qinghua | 8d36b00426 | |
wang_shaocong | d3db706c6e | |
zhangzhaoju | 659f181248 | |
luopengting | c8ba7694c5 | |
changzherui | 8d907a3c71 | |
neoming | 05c0027149 | |
zhangxinfeng3 | 1029695b54 |
|
@ -9,7 +9,9 @@ include(${CMAKE_SOURCE_DIR}/cmake/options.cmake)
|
|||
include(${CMAKE_SOURCE_DIR}/cmake/check_requirements.cmake)
|
||||
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/modules/")
|
||||
if(NOT CMAKE_SYSTEM_NAME MATCHES "Windows")
|
||||
add_compile_definitions(_GLIBCXX_USE_CXX11_ABI=0)
|
||||
if(NOT ENABLE_GLIBCXX)
|
||||
add_compile_definitions(_GLIBCXX_USE_CXX11_ABI=0)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
|
||||
|
@ -49,7 +51,7 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR}/third_party/flatbuffers/include)
|
|||
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/third_party/flatbuffers/include/flatbuffers)
|
||||
|
||||
include(${CMAKE_SOURCE_DIR}/cmake/dependency_utils.cmake)
|
||||
find_package(Python3 3.7 COMPONENTS Interpreter Development)
|
||||
find_package(Python3 COMPONENTS Interpreter Development)
|
||||
if(Python3_FOUND)
|
||||
set(PYTHON_INCLUDE_DIRS "${Python3_INCLUDE_DIRS}")
|
||||
set(PYTHON_LIBRARIES "${Python3_LIBRARIES}")
|
||||
|
|
|
@ -30,23 +30,24 @@ For individual contributor, please refer to [ICLA online document](https://www.m
|
|||
|
||||
Please follow this style to make MindSpore easy to review, maintain and develop.
|
||||
|
||||
* Coding guidelines
|
||||
- Coding guidelines
|
||||
|
||||
The *Python* coding style suggested by [Python PEP 8 Coding Style](https://pep8.org/) and *C++* coding style suggested by [Google C++ Coding Guidelines](http://google.github.io/styleguide/cppguide.html) are used in MindSpore community.
|
||||
|
||||
* Unittest guidelines
|
||||
- Unittest guidelines
|
||||
|
||||
The *Python* unittest style suggested by [pytest](http://www.pytest.org/en/latest/) and *C++* unittest style suggested by [Googletest Primer](https://github.com/google/googletest/blob/master/docs/primer.md) are used in MindSpore community.
|
||||
|
||||
### Fork-Pull development model
|
||||
|
||||
* Fork MindSpore repository
|
||||
- Fork MindSpore repository
|
||||
|
||||
Before submitting code to MindSpore project, please make sure that this project have been forked to your own repository. It means that there will be parallel development between MindSpore repository and your own repository, so be careful to avoid the inconsistency between them.
|
||||
|
||||
* Clone the remote repository
|
||||
- Clone the remote repository
|
||||
|
||||
If you want to download the code to the local machine, `git` is the best way:
|
||||
|
||||
```shell
|
||||
# For GitHub
|
||||
git clone https://github.com/{insert_your_forked_repo}/mindspore.git
|
||||
|
@ -56,18 +57,20 @@ Please follow this style to make MindSpore easy to review, maintain and develop.
|
|||
git remote add upstream https://gitee.com/mindspore/mindspore.git
|
||||
```
|
||||
|
||||
* Develop code locally
|
||||
- Develop code locally
|
||||
|
||||
To avoid inconsistency between multiple branches, checking out to a new branch is `SUGGESTED`:
|
||||
|
||||
```shell
|
||||
git checkout -b {new_branch_name} origin/master
|
||||
```
|
||||
|
||||
Then you can change the code arbitrarily.
|
||||
|
||||
* Push the code to the remote repository
|
||||
- Push the code to the remote repository
|
||||
|
||||
After updating the code, you should push the update in the formal way:
|
||||
|
||||
```shell
|
||||
git add .
|
||||
git status # Check the update status
|
||||
|
@ -76,7 +79,7 @@ Please follow this style to make MindSpore easy to review, maintain and develop.
|
|||
git push origin {new_branch_name}
|
||||
```
|
||||
|
||||
* Pull a request to MindSpore repository
|
||||
- Pull a request to MindSpore repository
|
||||
|
||||
In the last step, your need to pull a compare request between your new branch and MindSpore `master` branch. After finishing the pull request, the Jenkins CI will be automatically set up for building test.
|
||||
|
||||
|
@ -101,11 +104,11 @@ When reporting issues, refer to this format:
|
|||
|
||||
### Propose PRs
|
||||
|
||||
* Raise your idea as an *issue* on [GitHub](https://github.com/mindspore-ai/mindspore/issues) or [Gitee](https://gitee.com/mindspore/mindspore/issues)
|
||||
* If it is a new feature that needs lots of design details, a design proposal should also be submitted.
|
||||
* After reaching consensus in the issue discussions and design proposal reviews, complete the development on the forked repo and submit a PR.
|
||||
* None of PRs is not permitted until it receives **2+ LGTM** from approvers. Please NOTICE that approver is NOT allowed to add *LGTM* on his own PR.
|
||||
* After PR is sufficiently discussed, it will get merged, abandoned or rejected depending on the outcome of the discussion.
|
||||
- Raise your idea as an *issue* on [GitHub](https://github.com/mindspore-ai/mindspore/issues) or [Gitee](https://gitee.com/mindspore/mindspore/issues)
|
||||
- If it is a new feature that needs lots of design details, a design proposal should also be submitted.
|
||||
- After reaching consensus in the issue discussions and design proposal reviews, complete the development on the forked repo and submit a PR.
|
||||
- None of PRs is not permitted until it receives **2+ LGTM** from approvers. Please NOTICE that approver is NOT allowed to add *LGTM* on his own PR.
|
||||
- After PR is sufficiently discussed, it will get merged, abandoned or rejected depending on the outcome of the discussion.
|
||||
|
||||
**PRs advisory:**
|
||||
|
||||
|
|
|
@ -85,7 +85,7 @@ For installation using `pip`, take `CPU` and `Ubuntu-x86` build version as an ex
|
|||
1. Download whl from [MindSpore download page](https://www.mindspore.cn/versions/en), and install the package.
|
||||
|
||||
```bash
|
||||
pip install https://ms-release.obs.cn-north-4.myhuaweicloud.com/1.1.0/MindSpore/cpu/ubuntu_x86/mindspore-1.1.0-cp37-cp37m-linux_x86_64.whl
|
||||
pip install https://ms-release.obs.cn-north-4.myhuaweicloud.com/1.2.0-rc1/MindSpore/cpu/ubuntu_x86/mindspore-1.2.0rc1-cp37-cp37m-linux_x86_64.whl
|
||||
```
|
||||
|
||||
2. Run the following command to verify the install.
|
||||
|
|
|
@ -82,7 +82,7 @@ MindSpore提供跨多个后端的构建选项:
|
|||
1. 请从[MindSpore下载页面](https://www.mindspore.cn/versions)下载并安装whl包。
|
||||
|
||||
```bash
|
||||
pip install https://ms-release.obs.cn-north-4.myhuaweicloud.com/1.1.0/MindSpore/cpu/ubuntu_x86/mindspore-1.1.0-cp37-cp37m-linux_x86_64.whl
|
||||
pip install https://ms-release.obs.cn-north-4.myhuaweicloud.com/1.2.0-rc1/MindSpore/cpu/ubuntu_x86/mindspore-1.2.0rc1-cp37-cp37m-linux_x86_64.whl
|
||||
```
|
||||
|
||||
2. 执行以下命令,验证安装结果。
|
||||
|
|
669
RELEASE.md
669
RELEASE.md
|
@ -1,6 +1,68 @@
|
|||
# MindSpore 1.2.0 Release Notes
|
||||
# MindSpore 1.2.0
|
||||
|
||||
## MindSpore
|
||||
## MindSpore 1.2.0 Release Notes
|
||||
|
||||
### Major Features and Improvements
|
||||
|
||||
#### NewModels
|
||||
|
||||
- [STABLE] Add CV models on Ascend: 3D Unet, Unet++, SSD-Resnet50-fpn, SSD-VGG16, crnn_seq2seq_ocr for BSI, CTPN, resnet18, DPN
|
||||
- [STABLE] Add CV models on GPU: Faster-RCNN
|
||||
- [STABLE] Add NLP models on Ascend: NAML, Fasttext, GRU, LSTM
|
||||
- [BETA] Add TPRR: Thinking Path Re-Ranker, an original ranked-base framework for Multi-Hop Question Answering which has won the first place in HotpotQA leaderboard.(Ascend)
|
||||
|
||||
#### FrontEnd
|
||||
|
||||
- [STABLE] Support side effects expression to ensure that the perform order of user's semantics is correct.(Ascend/GPU/CPU)
|
||||
- [STABLE] Support calculating the gradient for network that contain non-Tensor input parameters(int, float, bool, mstype,int, mstype.float, mstype.uint, mstype.bool_, tuple, list, dict).(Ascend/GPU/CPU)
|
||||
- [STABLE] Support the inverse of a bool Tensor.(Ascend/GPU/CPU)
|
||||
- [STABLE] Uniform the interface `isinstance`.(Ascend/GPU/CPU)
|
||||
- [STABLE] Support negative indexes.(Ascend/GPU/CPU)
|
||||
- [STABLE] Support 110+ Numpy-like interfaces in mindspore.numpy.(Ascend/GPU/CPU)
|
||||
- [STABLE] Support export/load mindir model with a size greater than 2 GB.
|
||||
- [STABLE] The optimizer supports gradient centralization.(Ascend)
|
||||
- [STABLE] Support support auc metric, rou metric, bleu score metric, confusion matrix metric, cosine similarity metric, dice metric, hausdorff distance metric, occlusion sensitivity metric, perplexity metric, mean surface distance metric, root mean surface distance metric.
|
||||
- [STABLE] Support use EmbeddingLookup with cache.(Ascend)
|
||||
|
||||
#### Auto Parallel
|
||||
|
||||
- [STABLE] Support AllGather and ReduceScatter fusion.(Ascend)
|
||||
- [STABLE] Support gradient accumulation feature in auto parallel mode.(Ascend/GPU)
|
||||
- [STABLE] Support running parallel optimizer with gradient accumulation.(Ascend)
|
||||
- [STABLE] Add the configuration of communication operators' fusion.(Ascend)
|
||||
|
||||
#### Executor
|
||||
|
||||
- [STABLE] Support inference with Nvidia GPU.
|
||||
- [STABLE] Support data parallelism in PyNative mode.(Ascend/GPU)
|
||||
- [STABLE] Optimize LSTM inference memory consumption in Graph mode with CPU.
|
||||
|
||||
#### Sponge
|
||||
|
||||
- [STABLE] Add SPONGE modules for molecular dynamics simulation, including Bond, Angle, Dihedral, Non Bond 14, NeighborList, Particle Mesh Ewald, Langevin MD and LIUJIAN MD.(GPU)
|
||||
|
||||
#### DataSet
|
||||
|
||||
- [STABLE] If the libnuma library is installed in the environment, you can run `export DATASET_ENABLE_NUMA=True` to configure NUMA binding. In multi-card training scenarios, the training data processing speed can be improved, thereby improving the network training efficiency.
|
||||
- [STABLE] Unify API Tensor structure of Training/Inference interfaces in C++ SDK.
|
||||
- [STABLE] Optimize duplicated Decode in data preprocess using cache, improve preprocess efficiency.
|
||||
- [STABLE] Support eager mode to run data augmentation in Python & C++.
|
||||
- [STABLE] Support more data augmentation operators(e.g. Affine, Perspective) in MindSpore-Lite.
|
||||
- [STABLE] Support light pipeline to process MindData in MindSpore-Lite training.
|
||||
- [STABLE] Support more data preprossing operators based on DVPP hardware module and can be used on on Ascend310 platform.
|
||||
- [STABLE] Support copy-free property for data in Ascend310 inference process scenarios.
|
||||
|
||||
#### Running Data Recorder
|
||||
|
||||
- [STABLE] Support running data recorder (RDR) for exception demarcation.
|
||||
- [STABLE] Provide records of multi-stage computational graphs, memory allocation information, graph execution order, stream execution order and task debug information when a "run task error" or "distribute task failed" occurs. (Ascend)
|
||||
- [STABLE] Provide records of multi-stage computational graphs, memory allocation information and graph execution order when a "SyncStream error" occurs. (GPU)
|
||||
|
||||
#### 3D Feature
|
||||
|
||||
- [STABLE] Support 3D ops: Conv3D, Conv3DBackpropInput, Conv3DBackpropFilter, Conv3DTranspose, BiasAdd, BiasAddGrad, PReLU, Transpose, Reshape, transdata, StrideSlice, MaxPool3D, MaxPool3DGrad, BinaryCrossEntropy, SigmoidCrossEntropyWithLogits, SigmoidCrossEntropyWithLogitsGrad, SoftmaxCrossEntropyWithLogits, SigmoidCrossEntropyWithLogits, SigmoidCrossEntropyWithLogitsGrad, BatchNorm3d, BatchNorm3dGrad, Dropout3d.
|
||||
- [STABLE] Support RMSELoss loss function, MAELoss loss function, FocalLoss loss function, DiceLoss binary loss function, and MultiClassDiceLoss multi-type loss function for 2D/3D network.
|
||||
- [STABLE] Add optimizer: AdamApplyOne(3D), ApplyMomentum(3D), SGD(3D).
|
||||
|
||||
### API Change
|
||||
|
||||
|
@ -8,6 +70,79 @@
|
|||
|
||||
##### Python API
|
||||
|
||||
###### `mindspore.numpy.array()`, `mindspore.numpy.asarray()`, `mindspore.numpy.asfarray()`, `mindspore.numpy.copy()` now support GRAPH mode, but cannot accept `numpy.ndarray` as input arguments anymore([!12726](https://gitee.com/mindspore/mindspore/pulls/12726))
|
||||
|
||||
Previously, these interfaces can accept numpy.ndarray as arguments and convert numpy.ndarray to Tensor, but cannot be used in GRAPH mode.
|
||||
However, currently MindSpore Parser cannot parse numpy.ndarray in JIT-graph. To support these interfaces in graph mode, we have to remove `numpy.ndarray` support. With that being said, users can still use `Tensor` to convert `numpy.ndarray` to tensors.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td style="text-align:center"> 1.1.1 </td> <td style="text-align:center"> 1.2.0 </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
```python
|
||||
>>> import mindspore.numpy as mnp
|
||||
>>> import numpy
|
||||
>>>
|
||||
>>> nd_array = numpy.array([1,2,3])
|
||||
>>> tensor = mnp.asarray(nd_array) # this line cannot be parsed in GRAPH mode
|
||||
```
|
||||
|
||||
</td>
|
||||
<td>
|
||||
|
||||
```python
|
||||
>>> import mindspore.numpy as mnp
|
||||
>>> import numpy
|
||||
>>>
|
||||
>>> tensor = mnp.asarray([1,2,3]) # this line can be parsed in GRAPH mode
|
||||
```
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
###### mindspore.numpy interfaces remove support for keyword arguments `out` and `where`([!12726](https://gitee.com/mindspore/mindspore/pulls/12726))
|
||||
|
||||
Previously, we have incomplete support for keyword arguments `out` and `where` in mindspore.numpy interfaces, however, the `out` argument is only functional when `where` argument is also provided, and `out` cannot be used to pass reference to numpy functions. Therefore, we have removed these two arguments to avoid any confusion users may have. Their original functionality can be found in [np.where](https://www.mindspore.cn/doc/api_python/zh-CN/master/mindspore/numpy/mindspore.numpy.where.html#mindspore.numpy.where)
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td style="text-align:center"> 1.1.1 </td> <td style="text-align:center"> 1.2.0 </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
```python
|
||||
>>> import mindspore.numpy as np
|
||||
>>>
|
||||
>>> a = np.ones((3,3))
|
||||
>>> b = np.ones((3,3))
|
||||
>>> out = np.zeros((3,3))
|
||||
>>> where = np.asarray([[True, False, True],[False, False, True],[True, True, True]])
|
||||
>>> res = np.add(a, b, out=out, where=where) # `out` cannot be used as a reference, therefore it is misleading
|
||||
```
|
||||
|
||||
</td>
|
||||
<td>
|
||||
|
||||
```python
|
||||
>>> import mindspore.numpy as np
|
||||
>>>
|
||||
>>> a = np.ones((3,3))
|
||||
>>> b = np.ones((3,3))
|
||||
>>> out = np.zeros((3,3))
|
||||
>>> where = np.asarray([[True, False, True],[False, False, True],[True, True, True]])
|
||||
>>> res = np.add(a, b)
|
||||
>>> out = np.where(where, x=res, y=out) # instead of np.add(a, b, out=out, where=where)
|
||||
```
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
###### Turn `ops.MakeRefKey` into an internal interface ([!12010](https://gitee.com/mindspore/mindspore/pulls/12010))
|
||||
|
||||
Previously MakeRefKey is an external interface that is not used, now make it an internal interface with the same usage. We do not recommend users to use this interface, and we will remove the relevant introduction of this interface from the official website.
|
||||
|
@ -16,6 +151,534 @@ Previously MakeRefKey is an external interface that is not used, now make it an
|
|||
|
||||
Previously the number of outputs of these operator is different on different backends. To unify their definition we change their output on Ascend backend from multiple to a single.
|
||||
|
||||
##### `P.FusedBatchNorm`, `P.FusedBatchNormEx` deleted ([!12115](https://gitee.com/mindspore/mindspore/pulls/12115))
|
||||
|
||||
The FusedBatchNorm and FusedBatchNormEx interface has been deleted. Please use the batchnorm operator to replace it.
|
||||
|
||||
##### `MetaTensor` deleted ([!10325](https://gitee.com/mindspore/mindspore/pulls/10325))
|
||||
|
||||
The MetaTensor interface has been deleted. The function of MetaTensor has been integrated into tensor.
|
||||
|
||||
###### `ControlDepend` is deleted, use `Depend` instead. The decorator `@C.add_flags(has_effect=True)` does not work. ([!13793](https://gitee.com/mindspore/mindspore/pulls/13793))
|
||||
|
||||
Previously, we used ControlDepend to control the execution order of multiple operators. In version 1.2.0, mindspore introduces the auto-monad side effects expression to ensure that the perform order of user's semantics is correct. Therefore, ControlDepend is deleted and Depend is recommended.
|
||||
|
||||
In most scenarios, if operators have IO side effects (such as print) or memory side effects (such as assign), they will be executed according to the user's semantics. In some scenarios, if the two operators A and B have no order dependency, and A must be executed before B, we recommend using Depend to specify their execution order. See the API documentation of the Depend operator for specific usage.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td style="text-align:center"> 1.1.1 </td> <td style="text-align:center"> 1.2.0 </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
```python
|
||||
In some side-effect scenarios, we need to ensure the execution order of operators.
|
||||
In order to ensure that operator A is executed before operator B, it is recommended
|
||||
to insert the Depend operator between operators A and B.
|
||||
|
||||
Previously, the ControlDepend operator was used to control the execution order.
|
||||
Since the ControlDepend operator is deprecated from version 1.1, it is recommended
|
||||
to use the Depend operator instead. The replacement method is as follows::
|
||||
|
||||
a = A(x) ---> a = A(x)
|
||||
b = B(y) ---> y = Depend(y, a)
|
||||
ControlDepend(a, b) ---> b = B(y)
|
||||
```
|
||||
|
||||
</td>
|
||||
<td>
|
||||
|
||||
```python
|
||||
In most scenarios, if operators have IO side effects or memory side effects,
|
||||
they will be executed according to the user's semantics. In some scenarios,
|
||||
if the two operators A and B have no order dependency, and A must be executed
|
||||
before B, we recommend using Depend to specify their execution order. The
|
||||
usage method is as follows::
|
||||
|
||||
a = A(x) ---> a = A(x)
|
||||
b = B(y) ---> y = Depend(y, a)
|
||||
---> b = B(y)
|
||||
```
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
After the introduction of the auto-monad side effect expression feature, the decorator `@C.add_flags(has_effect=True)` does not work. If the decorator is used in the script, please modify. Take the overflow identification operator (without side effects) as an example, the modification method is as follows:
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td style="text-align:center"> 1.1.1 </td> <td style="text-align:center"> 1.2.0 </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
```python
|
||||
@C.add_flags(has_effect=True)
|
||||
def construct(self, *inputs):
|
||||
...
|
||||
loss = self.network(*inputs)
|
||||
init = self.allo_status()
|
||||
self.clear_status(init)
|
||||
...
|
||||
```
|
||||
|
||||
</td>
|
||||
<td>
|
||||
|
||||
```python
|
||||
def construct(self, *inputs):
|
||||
...
|
||||
loss = self.network(*inputs)
|
||||
init = self.allo_status()
|
||||
init = F.depend(init, loss)
|
||||
clear_status = self.clear_status(init)
|
||||
...
|
||||
```
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
##### C++ API
|
||||
|
||||
###### C++ API support dual ABI now.([!12432](https://gitee.com/mindspore/mindspore/pulls/12432))
|
||||
|
||||
1.1.1 supports only the old ABI. Currently, both the new and the old are supported.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td style="text-align:center"> 1.1.1 </td> <td style="text-align:center"> 1.2.0 </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
```cmake
|
||||
add_compile_definitions(_GLIBCXX_USE_CXX11_ABI=0)
|
||||
```
|
||||
|
||||
</td>
|
||||
<td>
|
||||
|
||||
```cmake
|
||||
add_compile_definitions(_GLIBCXX_USE_CXX11_ABI=0) # old ABI are supported
|
||||
add_compile_definitions(_GLIBCXX_USE_CXX11_ABI=1) # new ABI are supprrted, too
|
||||
# write nothing, use new ABI as default
|
||||
```
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
###### Context refactor.([!13515](https://gitee.com/mindspore/mindspore/pulls/13515))
|
||||
|
||||
The `Context` class is refactored. For details, see the API docs.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td style="text-align:center"> 1.1.1 </td> <td style="text-align:center"> 1.2.0 </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
```cpp
|
||||
GlobalContext::SetGlobalDeviceTarget(kDeviceTypeAscend310); // set device target is ascend310
|
||||
GlobalContext::SetGlobalDeviceID(0); // set device id is 0
|
||||
auto model_context = std::make_shared<ModelContext>(); // create a model context
|
||||
ModelContext::SetInsertOpConfigPath(model_context, "./aipp.cfg") // set aipp config file is ./aipp.cfg
|
||||
```
|
||||
|
||||
</td>
|
||||
<td>
|
||||
|
||||
```cpp
|
||||
auto model_context = std::make_shared<Context>(); // create a model context
|
||||
auto ascend310_info = std::make_shared<Ascend310DeviceInfo>();
|
||||
model_context.MutableDeviceInfo().push_back(ascend310_info ); // set device target is ascend310
|
||||
ascend310_info->SetDeviceID(0); // set device id is 0
|
||||
ascend310_info->SetInsertOpConfigPath("./aipp.cfg"); // set aipp config file is ./aipp.cfg
|
||||
```
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
###### LoadModel interface changes.([!13515](https://gitee.com/mindspore/mindspore/pulls/13515))
|
||||
|
||||
`LoadModel` is renamed `Load`. No exception is thrown new but the return status should be checked.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td style="text-align:center"> 1.1.1 </td> <td style="text-align:center"> 1.2.0 </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
```cpp
|
||||
try {
|
||||
auto graph = Serialization::LoadModel(model_file_path, kMindIR);
|
||||
} catch (...) { ... }
|
||||
```
|
||||
|
||||
</td>
|
||||
<td>
|
||||
|
||||
```cpp
|
||||
Graph graph;
|
||||
auto ret = Serialization::Load(model_file_path, kMindIR, &graph);
|
||||
if (ret != kSuccess) { ... }
|
||||
```
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
###### Model ctor changes.([!13515](https://gitee.com/mindspore/mindspore/pulls/13515))
|
||||
|
||||
`Model` uses a non-parameter ctor now, and arguments are passed in through `Build`.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td style="text-align:center"> 1.1.1 </td> <td style="text-align:center"> 1.2.0 </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
```cpp
|
||||
Model net(net_cell, model_context);
|
||||
auto ret = net.Build();
|
||||
if (ret != kSuccess) { ... }
|
||||
```
|
||||
|
||||
</td>
|
||||
<td>
|
||||
|
||||
```cpp
|
||||
Model net;
|
||||
auto ret = net.Build(net_cell, model_context);
|
||||
if (ret != kSuccess) { ... }
|
||||
```
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
###### MSTensor::CreateTensor returns a native pointer now.([!13515](https://gitee.com/mindspore/mindspore/pulls/13515))
|
||||
|
||||
`MSTensor::CreateTensor` and `MSTensor::CreateRefTensor` returns a native pointer now, need to be destroy by `DestroyTensorPtr`.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td style="text-align:center"> 1.1.1 </td> <td style="text-align:center"> 1.2.0 </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
```cpp
|
||||
auto tensor = MSTensor::CreateTensor(xxx, xxx, ...);
|
||||
auto name = tensor.Name();
|
||||
```
|
||||
|
||||
</td>
|
||||
<td>
|
||||
|
||||
```cpp
|
||||
auto tensor = MSTensor::CreateTensor(xxx, xxx, ...);
|
||||
auto name = tensor->Name();
|
||||
MSTensor::DestroyTensorPtr(tensor);
|
||||
```
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
#### New features
|
||||
|
||||
##### Python API
|
||||
|
||||
- Add SPONGE functions: `mindspore.ops.operations.BondForceWithAtomEnergy`, `mindspore.ops.operations.AngleForceWithAtomEnergy`, `mindspore.ops.operations.DihedralForceWithAtomEnergy`, `mindspore.ops.operations.Dihedral14LJCFForceWithAtomEnergy`, `mindspore.ops.operations.LJForceWithPMEDirectForce`, `mindspore.ops.operations.PMEExcludedForce`, `mindspore.ops.operations.PMEReciprocalForce`,`mindspore.ops.operations.BondEnergy`, `mindspore.ops.operations.AngleEnergy`,`mindspore.ops.operations.DihedralEnergy`, `mindspore.ops.operations.Dihedral14LJEnergy`, `mindspore.ops.operations.Dihedral14CFEnergy`,`mindspore.ops.operations.LJEnergy`, `mindspore.ops.operations.PMEEnergy`. All operators are supported in `GPU`.
|
||||
|
||||
#### Deprecations
|
||||
|
||||
##### Python API
|
||||
|
||||
###### `nn.MatMul` is now deprecated in favor of `ops.matmul` ([!12817](https://gitee.com/mindspore/mindspore/pulls/12817))
|
||||
|
||||
[ops.matmul](https://www.mindspore.cn/doc/api_python/zh-CN/master/mindspore/ops/mindspore.ops.matmul.html#mindspore.ops.matmul) follows the API of [numpy.matmul](https://numpy.org/doc/stable/reference/generated/numpy.matmul.html) as closely as possible. As a function interface, [ops.matmul](https://www.mindspore.cn/doc/api_python/zh-CN/master/mindspore/ops/mindspore.ops.matmul.html#mindspore.ops.matmul) is applied without instantiation, as opposed to `nn.MatMul`, which should only be used as a class instance.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td style="text-align:center"> 1.1.1 </td> <td style="text-align:center"> 1.2.0 </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
```python
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import Tensor, nn
|
||||
>>>
|
||||
>>> x = Tensor(np.ones((2, 3)).astype(onp.float32)
|
||||
>>> y = Tensor(np.ones((3, 4)).astype(onp.float32)
|
||||
>>> nn.MatMul()(x, y)
|
||||
```
|
||||
|
||||
</td>
|
||||
<td>
|
||||
|
||||
```python
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import Tensor, ops
|
||||
>>>
|
||||
>>> x = Tensor(np.ones((2, 3)).astype(onp.float32)
|
||||
>>> y = Tensor(np.ones((3, 4)).astype(onp.float32)
|
||||
>>> ops.matmul(x, y)
|
||||
```
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
### Bug fixes
|
||||
|
||||
#### FrontEnd
|
||||
|
||||
- fix the null pointer problem of evaluator in control flow.([!13312](https://gitee.com/mindspore/mindspore/pulls/13312))
|
||||
- fix parameter naming conflict bug for CellList and SequentialCell. ([!13260](https://gitee.com/mindspore/mindspore/pulls/13260))
|
||||
|
||||
#### Executor
|
||||
|
||||
- fix executor pending task not execute in some heterogeneous cases.([!13465](https://gitee.com/mindspore/mindspore/pulls/13465))
|
||||
- add passes to support frontend IR unification, including following operations: SliceGrad([!11783](https://gitee.com/mindspore/mindspore/pulls/11783)), ApplyFtrl, ApplyMomentum, ApplyRMSProp, CenteredRMSProp([!11895](https://gitee.com/mindspore/mindspore/pulls/11895)), AvgPoolGrad([!12813](https://gitee.com/mindspore/mindspore/pulls/12813)), BatchNorm([!12115](https://gitee.com/mindspore/mindspore/pulls/12115))
|
||||
|
||||
#### Dataset
|
||||
|
||||
- Fix getter functions(e.g. GetDatasetSize) terminated abnormally when use python multi-processing. ([!13571](https://gitee.com/mindspore/mindspore/pulls/13571), [!13823](https://gitee.com/mindspore/mindspore/pulls/13823))
|
||||
- Fix unclear error log of data augmentation operators. ([!12398](https://gitee.com/mindspore/mindspore/pulls/12398), [!12883](https://gitee.com/mindspore/mindspore/pulls/12883), [!13176](https://gitee.com/mindspore/mindspore/pulls/13176))
|
||||
- Fix profiling performs abnormally when sink_size = False, as saving data is later than profiling analysis. ([!13944](https://gitee.com/mindspore/mindspore/pulls/13944))
|
||||
|
||||
## MindSpore Lite
|
||||
|
||||
### Major Features and Improvements
|
||||
|
||||
#### Converter and runtime
|
||||
|
||||
1. Support TensorFlow model in Converter except aware-training model.
|
||||
2. Add fusion pattern for same horizontal operators in Converter.
|
||||
3. Support Jar in x86_64 system for integrating into server with Java backend conveniently.
|
||||
4. Provide unified runtime API for developer reusing their code between cloud side and end side.[BETA]
|
||||
5. Improve control-flow capabilities continually: Support GRU fusion in Converter; Support weight-quant for control-flow model; Support control-flow model inference with half precision; Support nested control-flow model.[BETA]
|
||||
|
||||
#### ARM backend optimization
|
||||
|
||||
1. Add NLP dependent float16 operators(like lstm) to enhance inference performance.
|
||||
2. Optimize operators: lstm, gru, depthwise.
|
||||
3. Add 6 NPU operators(like FullConnection), and fix some bugs about buildIR failed.
|
||||
|
||||
#### OpenCL backend
|
||||
|
||||
1. Add new ops:add 10+ ops,total 72 ops;
|
||||
2. Performance optimization:by memory layout optimize,block tiling,Performance improved by 30% compared to version 1.1 at Adreno GPU.
|
||||
3. Initialization time optimization:initialization time improve 100% vs MSLITE Version1.1 by store kernel cache as binary.
|
||||
4. Support Java call on Mali or Adreno GPU.
|
||||
|
||||
#### Post quantization
|
||||
|
||||
1. Support quantization of gather and lstm ops.
|
||||
2. Support quantizatizing TF Lite models with sub-graph node.
|
||||
3. Add quantiztion strategy to decide quantize ops or not,less accuracy loss and higher compression rate.
|
||||
|
||||
#### Training on Device
|
||||
|
||||
1. Virtual batching, use mini-batch to minic large batch in theorical with few RAM consumption.
|
||||
2. Converter unify, do not compile tod and iod converter separately.
|
||||
3. Performance optimization to BWD ops.
|
||||
4. TrainLoop with Off-The-Shelf Functionality blocks, like LR scheduler, Loss Monitor, Ckpt Saver, Accuracy Monitor.
|
||||
5. Integration of code with Minddata lite.
|
||||
6. Support more networks (googlenet, densenet, shufflenetv2, nin, vgg) and operators.
|
||||
|
||||
#### Codegen
|
||||
|
||||
1. Support 79 ops for the ARM platform and all CMSIS ops for Arm Cortex-M Series.
|
||||
2. Multiplatform support, including Android, IoT Devices.
|
||||
3. Support offline model weight preprocessing while compiling.
|
||||
4. Support offline memory reuse computing for minimum runtime buffer size.
|
||||
|
||||
### API Change
|
||||
|
||||
#### API Incompatible Change
|
||||
|
||||
##### C++ API
|
||||
|
||||
###### Add header file named lite_types.h for some common data structs. ([!12262](https://gitee.com/mindspore/mindspore/pulls/12262))
|
||||
|
||||
Previously, some common data structs such as `CpuBindMode` and `DeviceType` are in context.h, this may cause cross-dependency between headers. So we create a new header named lite_types.h for some common data structs and move `CpuBindMode` and `DeviceType` from context.h into lite_types.h.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td style="text-align:center"> lite_types.h </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
```cpp
|
||||
namespace mindspore::lite {
|
||||
/// \brief CpuBindMode defined for holding bind cpu strategy argument.
|
||||
typedef enum {
|
||||
NO_BIND, /**< no bind */
|
||||
HIGHER_CPU, /**< bind higher cpu first */
|
||||
MID_CPU /**< bind middle cpu first */
|
||||
} CpuBindMode;
|
||||
|
||||
/// \brief DeviceType defined for holding user's preferred backend.
|
||||
typedef enum {
|
||||
DT_CPU, /**< CPU device type */
|
||||
DT_GPU, /**< GPU device type */
|
||||
DT_NPU /**< NPU device type */
|
||||
} DeviceType;
|
||||
} // namespace mindspore::lite
|
||||
```
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
###### Add some new interfaces in ms_tensor.h for unified runtime API.([!13515](https://gitee.com/mindspore/mindspore/pulls/13515))
|
||||
|
||||
Previously, users could not create `MSTensor` or modify ``MSTensor, all `MSTensor` are created and managed by framework. However users need to create or modify MSTensor sometimes such as pre-processing input data. So we provide two new interfaces in ms_tensor.h: `CreateTensor` interface for creating `MSTensor` by user and `set_shape` interface for modifying the shape of `MSTensor`.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td style="text-align:center"> CreateTensor </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
```cpp
|
||||
/// \brief Create a MSTensor.
|
||||
///
|
||||
/// \return Pointer to an instance of MindSpore Lite MSTensor.
|
||||
static MSTensor *CreateTensor(const std::string &name, TypeId type, const std::vector<int> &shape, const void *data,
|
||||
size_t data_len);
|
||||
```
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td style="text-align:center"> set_shape </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
```cpp
|
||||
/// \brief Set the shape of MSTensor.
|
||||
virtual void set_shape(const std::vector<int> &shape) = 0;
|
||||
```
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
Previously, users could access to data of `MSTensor` by interface named `MutableData`. However `MutableData` is not only returning data of tensor but also allocating data for tensor if its data is nullptr. So we provide a new interfaces in ms_tensor.h named `data` for returning data of tensor without allocating automatically.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td style="text-align:center"> data </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
```cpp
|
||||
/// \brief Get the pointer of data in MSTensor.
|
||||
///
|
||||
/// \note The data pointer can be used to both write and read data in MSTensor. No memory buffer will be
|
||||
/// allocated.
|
||||
///
|
||||
/// \return the pointer points to data in MSTensor.
|
||||
virtual void *data() = 0;
|
||||
```
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
###### Delete `DimensionSize()` in ms_tensor.h.([!13515](https://gitee.com/mindspore/mindspore/pulls/13515))
|
||||
|
||||
The interface named `DimensionSize` is fuinctionally overlapped with the interface named `shape`. For the simplicity of the interface, we delete `DimensionSize` and recommend users to use the new interface named `shape` instead.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td style="text-align:center"> DimensionSize() </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
```cpp
|
||||
/// \brief Get size of the dimension of the MindSpore Lite MSTensor index by the parameter index.
|
||||
///
|
||||
/// \param[in] index Define index of dimension returned.
|
||||
///
|
||||
/// \return Size of dimension of the MindSpore Lite MSTensor.
|
||||
virtual int DimensionSize(size_t index) const = 0;
|
||||
```
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
###### Move allocator from namespace mindspore::lite to namespace lite for unified runtime API.([!13515](https://gitee.com/mindspore/mindspore/pulls/13515))
|
||||
|
||||
Previously, class `Allocator` is in namespace mindspore::lite. Considering unified allocator interface for unified runtime API, we move `Allocator` to namespace mindspore.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td style="text-align:center"> 1.1.0 </td> <td style="text-align:center"> 1.2.0 </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
```cpp
|
||||
namespace mindspore::lite {
|
||||
/// \brief Allocator defined a memory pool for malloc memory and free memory dynamically.
|
||||
///
|
||||
/// \note List public class and interface for reference.
|
||||
class Allocator;
|
||||
}
|
||||
```
|
||||
|
||||
</td>
|
||||
<td>
|
||||
|
||||
```cpp
|
||||
namespace mindspore {
|
||||
/// \brief Allocator defined a memory pool for malloc memory and free memory dynamically.
|
||||
///
|
||||
/// \note List public class and interface for reference.
|
||||
class Allocator;
|
||||
}
|
||||
```
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
### Bug fixes
|
||||
|
||||
1. Fix the bug that the array in kernel registrar is not initialized.
|
||||
2. Fix segment fault caused by releasing of OpParameter in Crop kernel in mistake.
|
||||
3. Fix the bug that the MINDIR aware-training model is finally interpreted as weight-quant model.
|
||||
|
||||
## Contributors
|
||||
|
||||
Thanks goes to these wonderful people:
|
||||
|
||||
Adel, AGroupofProbiotocs, anthonyaje, anzhengqi, askmiao, baihuawei, baiyangfan, bai-yangfan, bingyaweng, BowenK, buxue, caifubi, CaoJian, caojian05, caozhou, Cathy, changzherui, chenbo116, chenfei, chengxianbin, chenhaozhe, chenjianping, chenzomi, chenzupeng, chujinjin, cj, cjh9368, Corleone, damon0626, danish, Danish, davidmc, dayschan, doitH, dong-li001, eric, Eric, fary86, fuzhiye, Gaoxiong, GAO_HYP_XYJ, gengdongjie, Gogery, gongdaguo, gray0v0, gukecai, guoqi, gzhcv, hangq, hanhuifeng2020, Harshvardhan, He, heleiwang, hexia, Hoai, HuangBingjian, huangdongrun, huanghui, huangxinjing, huqi, huzhifeng, hwjiaorui, Islam Amin, Jesse, , Jiabin Liu, jianghui58, jiangzhiwen, Jiaqi, jin-xiulang, jinyaohui, jjfeing, John, Jonathan, jonyguo, JulyAi, jzg, kai00, kingfo, kingxian, kpy, kswang, laiyongqiang, leonwanghui, Li, liangchenghui, liangzelang, lichen_101010, lichenever, lihongkang, lilei, limingqi107, ling, linqingke, Lin Xh, liubuyu, liuwenhao4, liuxiao78, liuxiao93, liuyang_655, liuzhongkai, Lixia, lixian, liyanliu, liyong, lizhenyu, luopengting, luoyang, lvchangquan, lvliang, lz, mahdi, Mahdi, maning202007, Margaret_wangrui, mayang, mengyuanli, Ming_blue, nhussain, ougongchang, panfengfeng, panyifeng, Payne, Peilin, peixu_ren, Pengyongrong, qianlong, qianjiahong, r1chardf1d0, riemann_penn, rmdyh, Sheng, shenwei41, simson, Simson, Su, sunsuodong, tao_yunhao, tinazhang, VectorSL, , Wan, wandongdong, wangdongxu, wangmin, wangnan39@huawei.com, wangyue01, wangzhe, wanyiming, Wei, wenchunjiang, wilfChen, WilliamLian, wsc, wudenggang, wukesong, wuweikang, wuxuejian, Xiaoda, xiefangqi, xinyunfan, xuanyue, xulei2020, Xun, xuyongfei, yanghaitao, yanghaitao1, yanghaoran, YangLuo, yangruoqi713, yankai, yanzhenxiang2020, yao_yf, yepei6, yeyunpeng, Yi, yoni, yoonlee666, yuchaojie, yujianfeng, yuximiao, zengzitao, Zhang, zhanghaibo5@huawei.com, zhanghuiyao, zhanghui_china, zhangxinfeng3, zhangyihui, zhangz0911gm, zhanke, zhanyuan, zhaodezan, zhaojichen, zhaoting, zhaozhenlong, zhengjun10, zhiqwang, zhoufeng, zhousiyi, zhouyaqiang, zhouyifengCode, Zichun, Zirui, Ziyan, zjun, ZPaC, zymaa.
|
||||
|
||||
Contributions of any kind are welcome!
|
||||
|
||||
# MindSpore 1.1.1 Release Notes
|
||||
|
||||
## MindSpore
|
||||
|
@ -295,7 +958,7 @@ Examples:
|
|||
... self.depend = P.Depend()
|
||||
...
|
||||
... def construct(self, x, y):
|
||||
... mul = x * y
|
||||
... mul = x - y
|
||||
... y = self.depend(y, mul)
|
||||
... ret = self.softmax(y)
|
||||
... return ret
|
||||
|
|
2
akg
2
akg
|
@ -1 +1 @@
|
|||
Subproject commit e2a0a264a0be549b51a035e5f783927052f8ead8
|
||||
Subproject commit a5a856cd2ccabd896be3ce44544eea26bf90e764
|
12
build.bat
12
build.bat
|
@ -26,16 +26,12 @@ set VERSION_MAJOR=''
|
|||
set VERSION_MINOR=''
|
||||
set ERSION_REVISION=''
|
||||
|
||||
find "const int ms_version_major =" mindspore\lite\include\version.h > version.txt
|
||||
for /f "delims=\= tokens=2" %%a in ('findstr "const int ms_version_major = " version.txt') do (set x=%%a)
|
||||
for /f "delims=\= tokens=2" %%a in ('findstr /C:"const int ms_version_major = " mindspore\lite\include\version.h') do (set x=%%a)
|
||||
set VERSION_MAJOR=%x:~1,1%
|
||||
find "const int ms_version_minor =" mindspore\lite\include\version.h > version.txt
|
||||
for /f "delims=\= tokens=2" %%b in ('findstr "const int ms_versio/retestn_minor = " version.txt') do (set y=%%b)
|
||||
for /f "delims=\= tokens=2" %%b in ('findstr /C:"const int ms_version_minor = " mindspore\lite\include\version.h') do (set y=%%b)
|
||||
set VERSION_MINOR=%y:~1,1%
|
||||
find "const int ms_version_revision =" mindspore\lite\include\version.h > version.txt
|
||||
for /f "delims=\= tokens=2" %%c in ('findstr "const int ms_version_revision = " version.txt') do (set z=%%c)
|
||||
for /f "delims=\= tokens=2" %%c in ('findstr /C:"const int ms_version_revision = " mindspore\lite\include\version.h') do (set z=%%c)
|
||||
set VERSION_REVISION=%z:~1,1%
|
||||
del version.txt
|
||||
|
||||
echo "======Start building MindSpore Lite %VERSION_MAJOR%.%VERSION_MINOR%.%VERSION_REVISION%======"
|
||||
|
||||
|
@ -78,6 +74,8 @@ IF NOT EXIST "%BUILD_PATH%/mindspore" (
|
|||
|
||||
cd %BUILD_PATH%/mindspore
|
||||
IF "%1%" == "lite" (
|
||||
cmake --build "%BUILD_PATH%\mindspore" --target clean
|
||||
rd /s /q "%BASE_PATH%\output"
|
||||
(git log -1 | findstr "^commit") > %BUILD_PATH%\.commit_id
|
||||
cmake -DPLATFORM_ARM64=off -DSUPPORT_TRAIN=off ^
|
||||
-DENABLE_TOOLS=on -DENABLE_CONVERTER=on -DBUILD_TESTCASES=off ^
|
||||
|
|
135
build.sh
135
build.sh
|
@ -536,27 +536,16 @@ write_commit_file() {
|
|||
echo ${COMMIT_STR} > "${BASEPATH}/mindspore/lite/build/.commit_id"
|
||||
}
|
||||
|
||||
gen_fbs() {
|
||||
if [[ "${ENABLE_TOOLS}" == "on" ]]; then
|
||||
if [[ -f ${BASEPATH}/mindspore/lite/build/tools/schema_gen/schema_gen ]]; then
|
||||
cd ${BASEPATH}/mindspore/lite/build/tools/schema_gen
|
||||
./schema_gen
|
||||
cd -
|
||||
diff_ops=$(diff ${BASEPATH}/mindspore/lite/build/tools/schema_gen/ops.fbs ${BASEPATH}/mindspore/lite/schema/ops.fbs || true)
|
||||
if [[ "X${diff_ops}" != "X" ]]; then
|
||||
cp ${BASEPATH}/mindspore/lite/build/tools/schema_gen/ops.fbs ${BASEPATH}/mindspore/lite/schema/
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
build_lite()
|
||||
{
|
||||
rm -rf ${BASEPATH}/output/*
|
||||
get_version
|
||||
echo "============ Start building MindSpore Lite ${VERSION_STR} ============"
|
||||
local LOCAL_LITE_PLATFORM=${LITE_PLATFORM}
|
||||
local LOCAL_INC_BUILD=${INC_BUILD}
|
||||
local LOCAL_LITE_ENABLE_GPU=${LITE_ENABLE_GPU}
|
||||
local LOCAL_LITE_ENABLE_NPU=${ENABLE_NPU}
|
||||
|
||||
if [[ "${LITE_LANGUAGE}" == "java" ]]; then
|
||||
if [[ "X$1" != "X" ]]; then
|
||||
LOCAL_LITE_PLATFORM=$1
|
||||
|
@ -573,13 +562,23 @@ build_lite()
|
|||
else
|
||||
LOCAL_LITE_ENABLE_GPU=""
|
||||
fi
|
||||
mkdir -p ${BASEPATH}/mindspore/lite/build/java
|
||||
cd ${BASEPATH}/mindspore/lite/build/
|
||||
find . -maxdepth 1 | grep -v java | grep '/' | xargs -I {} rm -rf {}
|
||||
fi
|
||||
LITE_ENABLE_NPU=${ENABLE_NPU}
|
||||
if [[ "${LITE_LANGUAGE}" == "cpp" && "${DEVICE}" == "" && "${LOCAL_LITE_PLATFORM}" == "arm64" ]]; then
|
||||
LOCAL_LITE_ENABLE_GPU="opencl"
|
||||
LITE_ENABLE_NPU="on"
|
||||
if [[ "${LITE_LANGUAGE}" == "cpp" ]]; then
|
||||
if [[ "${DEVICE}" == "" && "${LOCAL_LITE_PLATFORM}" == "arm64" ]]; then
|
||||
LOCAL_LITE_ENABLE_GPU="opencl"
|
||||
LOCAL_LITE_ENABLE_NPU="on"
|
||||
fi
|
||||
|
||||
if [[ "${LOCAL_INC_BUILD}" == "off" ]]; then
|
||||
rm -rf ${BASEPATH}/mindspore/lite/build
|
||||
fi
|
||||
mkdir -pv ${BASEPATH}/mindspore/lite/build
|
||||
fi
|
||||
if [ "${LITE_ENABLE_NPU}" == "on" ]; then
|
||||
|
||||
if [ "${LOCAL_LITE_ENABLE_NPU}" == "on" ]; then
|
||||
if [ "${LOCAL_LITE_PLATFORM}" == "arm64" ]; then
|
||||
checkddk
|
||||
else
|
||||
|
@ -588,12 +587,7 @@ build_lite()
|
|||
fi
|
||||
fi
|
||||
|
||||
cd "${BASEPATH}/mindspore/lite"
|
||||
if [[ "${LOCAL_INC_BUILD}" == "off" ]]; then
|
||||
rm -rf build
|
||||
fi
|
||||
mkdir -pv build
|
||||
cd build
|
||||
cd ${BASEPATH}/mindspore/lite/build
|
||||
write_commit_file
|
||||
BUILD_TYPE="Release"
|
||||
if [[ "${DEBUG_MODE}" == "on" ]]; then
|
||||
|
@ -607,7 +601,7 @@ build_lite()
|
|||
-DANDROID_STL=${ANDROID_STL} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DSUPPORT_TRAIN=${SUPPORT_TRAIN} \
|
||||
-DPLATFORM_ARM64=on -DENABLE_NEON=on -DENABLE_FP16="on" \
|
||||
-DENABLE_TOOLS=${ENABLE_TOOLS} -DENABLE_CONVERTER=${ENABLE_CONVERTER} -DBUILD_TESTCASES=${RUN_TESTCASES} \
|
||||
-DSUPPORT_GPU=${LOCAL_LITE_ENABLE_GPU} -DSUPPORT_NPU=${LITE_ENABLE_NPU} -DENABLE_V0=on \
|
||||
-DSUPPORT_GPU=${LOCAL_LITE_ENABLE_GPU} -DSUPPORT_NPU=${LOCAL_LITE_ENABLE_NPU} -DENABLE_V0=on \
|
||||
-DOFFLINE_COMPILE=${OPENCL_OFFLINE_COMPILE} -DBUILD_MINDDATA=${COMPILE_MINDDATA_LITE} \
|
||||
-DCMAKE_INSTALL_PREFIX=${BASEPATH}/output/tmp -DMS_VERSION_MAJOR=${VERSION_MAJOR} \
|
||||
-DMS_VERSION_MINOR=${VERSION_MINOR} -DMS_VERSION_REVISION=${VERSION_REVISION} -DENABLE_VERBOSE=${ENABLE_VERBOSE} \
|
||||
|
@ -619,7 +613,7 @@ build_lite()
|
|||
-DANDROID_STL=${ANDROID_STL} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
|
||||
-DPLATFORM_ARM32=on -DENABLE_NEON=on -DSUPPORT_TRAIN=${SUPPORT_TRAIN} \
|
||||
-DENABLE_TOOLS=${ENABLE_TOOLS} -DENABLE_CONVERTER=${ENABLE_CONVERTER} -DBUILD_TESTCASES=${RUN_TESTCASES} \
|
||||
-DSUPPORT_GPU=${LOCAL_LITE_ENABLE_GPU} -DSUPPORT_NPU=${ENABLE_NPU} -DENABLE_V0=on \
|
||||
-DSUPPORT_GPU=${LOCAL_LITE_ENABLE_GPU} -DSUPPORT_NPU=${LOCAL_LITE_ENABLE_NPU} -DENABLE_V0=on \
|
||||
-DOFFLINE_COMPILE=${OPENCL_OFFLINE_COMPILE} -DBUILD_MINDDATA=${COMPILE_MINDDATA_LITE} \
|
||||
-DCMAKE_INSTALL_PREFIX=${BASEPATH}/output/tmp -DMS_VERSION_MAJOR=${VERSION_MAJOR} \
|
||||
-DMS_VERSION_MINOR=${VERSION_MINOR} -DMS_VERSION_REVISION=${VERSION_REVISION} -DENABLE_VERBOSE=${ENABLE_VERBOSE} \
|
||||
|
@ -627,19 +621,22 @@ build_lite()
|
|||
else
|
||||
cmake -DPLATFORM_ARM64=off -DSUPPORT_TRAIN=${SUPPORT_TRAIN} \
|
||||
-DENABLE_TOOLS=${ENABLE_TOOLS} -DENABLE_CONVERTER=${ENABLE_CONVERTER} -DBUILD_TESTCASES=${RUN_TESTCASES} \
|
||||
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DSUPPORT_GPU=${LOCAL_LITE_ENABLE_GPU} -DSUPPORT_NPU=${ENABLE_NPU} \
|
||||
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DSUPPORT_GPU=${LOCAL_LITE_ENABLE_GPU} -DSUPPORT_NPU=${LOCAL_LITE_ENABLE_NPU} \
|
||||
-DBUILD_MINDDATA=${COMPILE_MINDDATA_LITE} -DENABLE_V0=on \
|
||||
-DOFFLINE_COMPILE=${OPENCL_OFFLINE_COMPILE} -DCMAKE_INSTALL_PREFIX=${BASEPATH}/output/tmp \
|
||||
-DMS_VERSION_MAJOR=${VERSION_MAJOR} -DMS_VERSION_MINOR=${VERSION_MINOR} -DMS_VERSION_REVISION=${VERSION_REVISION} \
|
||||
-DENABLE_VERBOSE=${ENABLE_VERBOSE} -DX86_64_SIMD=${X86_64_SIMD} "${BASEPATH}/mindspore/lite"
|
||||
fi
|
||||
make -j$THREAD_NUM && make install && make package
|
||||
gen_fbs
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "---------------- mindspore lite: build failed ----------------"
|
||||
exit 1
|
||||
else
|
||||
mv ${BASEPATH}/output/tmp/*.tar.gz* ${BASEPATH}/output/
|
||||
if [[ "${LITE_LANGUAGE}" == "cpp" ]]; then
|
||||
mv ${BASEPATH}/output/tmp/*.tar.gz* ${BASEPATH}/output/
|
||||
elif [[ "${LITE_LANGUAGE}" == "java" ]]; then
|
||||
mv ${BASEPATH}/output/tmp/*.tar.gz* ${BASEPATH}/mindspore/lite/build/java
|
||||
fi
|
||||
rm -rf ${BASEPATH}/output/tmp/
|
||||
echo "---------------- mindspore lite: build success ----------------"
|
||||
if [[ "X$LITE_LANGUAGE" = "Xcpp" ]]; then
|
||||
|
@ -654,7 +651,7 @@ build_lite_java_arm64() {
|
|||
if [[ "X$SUPPORT_TRAIN" = "Xon" ]]; then
|
||||
JTARBALL=mindspore-lite-${VERSION_STR}-train-android-aarch64
|
||||
fi
|
||||
if [[ "X$INC_BUILD" = "Xoff" ]] || [[ ! -f "${BASEPATH}/output/${JTARBALL}.tar.gz" ]]; then
|
||||
if [[ "X$INC_BUILD" == "Xoff" ]] || [[ ! -f "${BASEPATH}/mindspore/lite/build/java/${JTARBALL}.tar.gz" ]]; then
|
||||
if [[ "X${DEVICE}" == "Xcpu" ]]; then
|
||||
build_lite "arm64" "off" ""
|
||||
elif [[ "X${DEVICE}" == "Xnpu" ]]; then
|
||||
|
@ -665,18 +662,18 @@ build_lite_java_arm64() {
|
|||
fi
|
||||
fi
|
||||
# copy arm64 so
|
||||
cd ${BASEPATH}/output/
|
||||
cd ${BASEPATH}/mindspore/lite/build/java/
|
||||
rm -rf ${JTARBALL}
|
||||
tar -zxvf ${JTARBALL}.tar.gz
|
||||
[ -n "${JAVA_PATH}" ] && rm -rf ${JAVA_PATH}/java/app/libs/arm64-v8a/
|
||||
mkdir -p ${JAVA_PATH}/java/app/libs/arm64-v8a/
|
||||
mkdir -p ${JAVA_PATH}/native/libs/arm64-v8a/
|
||||
if [[ "X$SUPPORT_TRAIN" = "Xon" ]]; then
|
||||
cp ${BASEPATH}/output/${JTARBALL}/train/lib/libmindspore-lite.so ${JAVA_PATH}/java/app/libs/arm64-v8a/
|
||||
cp ${BASEPATH}/output/${JTARBALL}/train/lib/libmindspore-lite.so ${JAVA_PATH}/native/libs/arm64-v8a/
|
||||
cp ${BASEPATH}/mindspore/lite/build/java/${JTARBALL}/train/lib/libmindspore-lite.so ${JAVA_PATH}/java/app/libs/arm64-v8a/
|
||||
cp ${BASEPATH}/mindspore/lite/build/java/${JTARBALL}/train/lib/libmindspore-lite.so ${JAVA_PATH}/native/libs/arm64-v8a/
|
||||
else
|
||||
cp ${BASEPATH}/output/${JTARBALL}/inference/lib/libmindspore-lite.so ${JAVA_PATH}/java/app/libs/arm64-v8a/
|
||||
cp ${BASEPATH}/output/${JTARBALL}/inference/lib/libmindspore-lite.so ${JAVA_PATH}/native/libs/arm64-v8a/
|
||||
cp ${BASEPATH}/mindspore/lite/build/java/${JTARBALL}/inference/lib/libmindspore-lite.so ${JAVA_PATH}/java/app/libs/arm64-v8a/
|
||||
cp ${BASEPATH}/mindspore/lite/build/java/${JTARBALL}/inference/lib/libmindspore-lite.so ${JAVA_PATH}/native/libs/arm64-v8a/
|
||||
fi
|
||||
[ -n "${VERSION_STR}" ] && rm -rf ${JTARBALL}
|
||||
}
|
||||
|
@ -687,22 +684,22 @@ build_lite_java_arm32() {
|
|||
if [[ "X$SUPPORT_TRAIN" = "Xon" ]]; then
|
||||
JTARBALL=mindspore-lite-${VERSION_STR}-train-android-aarch32
|
||||
fi
|
||||
if [[ "X$INC_BUILD" = "Xoff" ]] || [[ ! -f "${BASEPATH}/output/${JTARBALL}.tar.gz" ]]; then
|
||||
if [[ "X$INC_BUILD" == "Xoff" ]] || [[ ! -f "${BASEPATH}/mindspore/lite/build/java/${JTARBALL}.tar.gz" ]]; then
|
||||
build_lite "arm32" "off" ""
|
||||
fi
|
||||
# copy arm32 so
|
||||
cd ${BASEPATH}/output/
|
||||
cd ${BASEPATH}/mindspore/lite/build/java/
|
||||
rm -rf ${JTARBALL}
|
||||
tar -zxvf ${JTARBALL}.tar.gz
|
||||
[ -n "${JAVA_PATH}" ] && rm -rf ${JAVA_PATH}/java/app/libs/armeabi-v7a/
|
||||
mkdir -p ${JAVA_PATH}/java/app/libs/armeabi-v7a/
|
||||
mkdir -p ${JAVA_PATH}/native/libs/armeabi-v7a/
|
||||
if [[ "X$SUPPORT_TRAIN" = "Xon" ]]; then
|
||||
cp ${BASEPATH}/output/${JTARBALL}/train/lib/libmindspore-lite.so ${JAVA_PATH}/java/app/libs/armeabi-v7a/
|
||||
cp ${BASEPATH}/output/${JTARBALL}/train/lib/libmindspore-lite.so ${JAVA_PATH}/native/libs/armeabi-v7a/
|
||||
cp ${BASEPATH}/mindspore/lite/build/java/${JTARBALL}/train/lib/libmindspore-lite.so ${JAVA_PATH}/java/app/libs/armeabi-v7a/
|
||||
cp ${BASEPATH}/mindspore/lite/build/java/${JTARBALL}/train/lib/libmindspore-lite.so ${JAVA_PATH}/native/libs/armeabi-v7a/
|
||||
else
|
||||
cp ${BASEPATH}/output/${JTARBALL}/inference/lib/libmindspore-lite.so ${JAVA_PATH}/java/app/libs/armeabi-v7a/
|
||||
cp ${BASEPATH}/output/${JTARBALL}/inference/lib/libmindspore-lite.so ${JAVA_PATH}/native/libs/armeabi-v7a/
|
||||
cp ${BASEPATH}/mindspore/lite/build/java/${JTARBALL}/inference/lib/libmindspore-lite.so ${JAVA_PATH}/java/app/libs/armeabi-v7a/
|
||||
cp ${BASEPATH}/mindspore/lite/build/java/${JTARBALL}/inference/lib/libmindspore-lite.so ${JAVA_PATH}/native/libs/armeabi-v7a/
|
||||
fi
|
||||
[ -n "${VERSION_STR}" ] && rm -rf ${JTARBALL}
|
||||
}
|
||||
|
@ -710,26 +707,26 @@ build_lite_java_arm32() {
|
|||
build_lite_java_x86() {
|
||||
# build mindspore-lite x86
|
||||
local JTARBALL=mindspore-lite-${VERSION_STR}-inference-linux-x64
|
||||
if [[ "X$INC_BUILD" = "Xoff" ]] || [[ ! -f "${BASEPATH}/output/${JTARBALL}.tar.gz" ]]; then
|
||||
if [[ "X$INC_BUILD" == "Xoff" ]] || [[ ! -f "${BASEPATH}/mindspore/lite/build/java/${JTARBALL}.tar.gz" ]]; then
|
||||
build_lite "x86_64" "off" ""
|
||||
fi
|
||||
# copy x86 so
|
||||
cd ${BASEPATH}/output/
|
||||
cd ${BASEPATH}/mindspore/lite/build/java
|
||||
rm -rf ${JTARBALL}
|
||||
tar -zxvf ${JTARBALL}.tar.gz
|
||||
[ -n "${JAVA_PATH}" ] && rm -rf ${JAVA_PATH}/java/linux_x86/libs/
|
||||
mkdir -p ${JAVA_PATH}/java/linux_x86/libs/
|
||||
mkdir -p ${JAVA_PATH}/native/libs/linux_x86/
|
||||
cp ${BASEPATH}/output/${JTARBALL}/inference/lib/libmindspore-lite.so ${JAVA_PATH}/java/linux_x86/libs/
|
||||
cp ${BASEPATH}/output/${JTARBALL}/inference/lib/libmindspore-lite.so ${JAVA_PATH}/native/libs/linux_x86/
|
||||
cp ${BASEPATH}/mindspore/lite/build/java/${JTARBALL}/inference/lib/libmindspore-lite.so ${JAVA_PATH}/java/linux_x86/libs/
|
||||
cp ${BASEPATH}/mindspore/lite/build/java/${JTARBALL}/inference/lib/libmindspore-lite.so ${JAVA_PATH}/native/libs/linux_x86/
|
||||
}
|
||||
|
||||
build_jni_arm64() {
|
||||
# build jni so
|
||||
cd "${BASEPATH}/mindspore/lite/build"
|
||||
rm -rf java
|
||||
mkdir -pv java
|
||||
cd java
|
||||
rm -rf java/jni
|
||||
mkdir -pv java/jni
|
||||
cd java/jni
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE="${ANDROID_NDK}/build/cmake/android.toolchain.cmake" -DANDROID_NATIVE_API_LEVEL="19" \
|
||||
-DANDROID_NDK="${ANDROID_NDK}" -DANDROID_ABI="arm64-v8a" -DANDROID_TOOLCHAIN_NAME="aarch64-linux-android-clang" \
|
||||
-DMS_VERSION_MAJOR=${VERSION_MAJOR} -DMS_VERSION_MINOR=${VERSION_MINOR} -DMS_VERSION_REVISION=${VERSION_REVISION} \
|
||||
|
@ -741,17 +738,17 @@ build_jni_arm64() {
|
|||
exit 1
|
||||
fi
|
||||
mkdir -p ${JAVA_PATH}/java/app/libs/arm64-v8a/
|
||||
cp ${BASEPATH}/mindspore/lite/build/java/libmindspore-lite-jni.so ${JAVA_PATH}/java/app/libs/arm64-v8a/
|
||||
cp ${BASEPATH}/mindspore/lite/build/java/jni/libmindspore-lite-jni.so ${JAVA_PATH}/java/app/libs/arm64-v8a/
|
||||
mkdir -p ${JAVA_PATH}/native/libs/arm64-v8a/
|
||||
cp ${BASEPATH}/mindspore/lite/build/java/libmindspore-lite-jni.so ${JAVA_PATH}/native/libs/arm64-v8a/
|
||||
cp ${BASEPATH}/mindspore/lite/build/java/jni/libmindspore-lite-jni.so ${JAVA_PATH}/native/libs/arm64-v8a/
|
||||
}
|
||||
|
||||
build_jni_arm32() {
|
||||
# build jni so
|
||||
cd "${BASEPATH}/mindspore/lite/build"
|
||||
rm -rf java
|
||||
mkdir -pv java
|
||||
cd java
|
||||
rm -rf java/jni
|
||||
mkdir -pv java/jni
|
||||
cd java/jni
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE="${ANDROID_NDK}/build/cmake/android.toolchain.cmake" -DANDROID_NATIVE_API_LEVEL="19" \
|
||||
-DANDROID_NDK="${ANDROID_NDK}" -DANDROID_ABI="armeabi-v7a" -DANDROID_TOOLCHAIN_NAME="aarch64-linux-android-clang" \
|
||||
-DMS_VERSION_MAJOR=${VERSION_MAJOR} -DMS_VERSION_MINOR=${VERSION_MINOR} -DMS_VERSION_REVISION=${VERSION_REVISION} \
|
||||
|
@ -763,17 +760,17 @@ build_jni_arm32() {
|
|||
exit 1
|
||||
fi
|
||||
mkdir -p ${JAVA_PATH}/java/app/libs/armeabi-v7a/
|
||||
cp ${BASEPATH}/mindspore/lite/build/java/libmindspore-lite-jni.so ${JAVA_PATH}/java/app/libs/armeabi-v7a/
|
||||
cp ${BASEPATH}/mindspore/lite/build/java/jni/libmindspore-lite-jni.so ${JAVA_PATH}/java/app/libs/armeabi-v7a/
|
||||
mkdir -p ${JAVA_PATH}/native/libs/armeabi-v7a/
|
||||
cp ${BASEPATH}/mindspore/lite/build/java/libmindspore-lite-jni.so ${JAVA_PATH}/native/libs/armeabi-v7a/
|
||||
cp ${BASEPATH}/mindspore/lite/build/java/jni/libmindspore-lite-jni.so ${JAVA_PATH}/native/libs/armeabi-v7a/
|
||||
}
|
||||
|
||||
build_jni_x86_64() {
|
||||
# build jni so
|
||||
cd "${BASEPATH}/mindspore/lite/build"
|
||||
rm -rf java
|
||||
mkdir -pv java
|
||||
cd java
|
||||
rm -rf java/jni
|
||||
mkdir -pv java/jni
|
||||
cd java/jni
|
||||
cmake -DMS_VERSION_MAJOR=${VERSION_MAJOR} -DMS_VERSION_MINOR=${VERSION_MINOR} -DMS_VERSION_REVISION=${VERSION_REVISION} \
|
||||
-DENABLE_VERBOSE=${ENABLE_VERBOSE} "${JAVA_PATH}/native/"
|
||||
make -j$THREAD_NUM
|
||||
|
@ -782,9 +779,9 @@ build_jni_x86_64() {
|
|||
exit 1
|
||||
fi
|
||||
mkdir -p ${JAVA_PATH}/java/linux_x86/libs/
|
||||
cp ${BASEPATH}/mindspore/lite/build/java/libmindspore-lite-jni.so ${JAVA_PATH}/java/linux_x86/libs/
|
||||
cp ${BASEPATH}/mindspore/lite/build/java/jni/libmindspore-lite-jni.so ${JAVA_PATH}/java/linux_x86/libs/
|
||||
mkdir -p ${JAVA_PATH}/native/libs/linux_x86/
|
||||
cp ${BASEPATH}/mindspore/lite/build/java/libmindspore-lite-jni.so ${JAVA_PATH}/native/libs/linux_x86/
|
||||
cp ${BASEPATH}/mindspore/lite/build/java/jni/libmindspore-lite-jni.so ${JAVA_PATH}/native/libs/linux_x86/
|
||||
}
|
||||
|
||||
check_java_home() {
|
||||
|
@ -799,6 +796,9 @@ check_java_home() {
|
|||
build_java() {
|
||||
JAVA_PATH=${BASEPATH}/mindspore/lite/java
|
||||
get_version
|
||||
if [[ "X${INC_BUILD}" == "Xoff" ]]; then
|
||||
rm -rf ${BASEPATH}/mindspore/lite/build
|
||||
fi
|
||||
# build common module
|
||||
cd ${JAVA_PATH}/java/common
|
||||
gradle clean
|
||||
|
@ -820,8 +820,6 @@ build_java() {
|
|||
|
||||
cd ${JAVA_PATH}/java/app/build
|
||||
zip -r mindspore-lite-maven-${VERSION_STR}.zip mindspore
|
||||
# copy output
|
||||
cp mindspore-lite-maven-${VERSION_STR}.zip ${BASEPATH}/output/
|
||||
|
||||
# build linux x86 jar
|
||||
check_java_home
|
||||
|
@ -838,13 +836,14 @@ build_java() {
|
|||
mkdir -p ${JAVA_PATH}/java/linux_x86/build/lib
|
||||
cp ${JAVA_PATH}/java/linux_x86/libs/*.so ${JAVA_PATH}/java/linux_x86/build/lib/jar
|
||||
cd ${JAVA_PATH}/java/linux_x86/build/
|
||||
cp -r ${JAVA_PATH}/java/linux_x86/build/lib ${JAVA_PATH}/java/linux_x86/build/mindspore-lite-${VERSION_STR}-inference-linux-x64-jar
|
||||
mkdir -p ${JAVA_PATH}/java/linux_x86/build/mindspore-lite-${VERSION_STR}-inference-linux-x64-jar
|
||||
tar czvf mindspore-lite-${VERSION_STR}-inference-linux-x64-jar.tar.gz ./mindspore-lite-${VERSION_STR}-inference-linux-x64-jar
|
||||
local LINUX_X86_PACKAGE_NAME=mindspore-lite-${VERSION_STR}-inference-linux-x64-jar
|
||||
cp -r ${JAVA_PATH}/java/linux_x86/build/lib ${JAVA_PATH}/java/linux_x86/build/${LINUX_X86_PACKAGE_NAME}
|
||||
tar czvf ${LINUX_X86_PACKAGE_NAME}.tar.gz ${LINUX_X86_PACKAGE_NAME}
|
||||
# copy output
|
||||
cp mindspore-lite-${VERSION_STR}-inference-linux-x64-jar.tar.gz ${BASEPATH}/output
|
||||
cp ${JAVA_PATH}/java/app/build/mindspore-lite-maven-${VERSION_STR}.zip ${BASEPATH}/output
|
||||
cp ${LINUX_X86_PACKAGE_NAME}.tar.gz ${BASEPATH}/output
|
||||
cd ${BASEPATH}/output
|
||||
[ -n "${VERSION_STR}" ] && rm -rf mindspore-lite-${VERSION_STR}-inference-linux-x64
|
||||
[ -n "${VERSION_STR}" ] && rm -rf ${BASEPATH}/mindspore/lite/build/java/mindspore-lite-${VERSION_STR}-inference-linux-x64
|
||||
exit 0
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
## define customized find fucntions, print customized error messages
|
||||
## define customized find functions, print customized error messages
|
||||
function(find_required_package pkg_name)
|
||||
find_package(${pkg_name})
|
||||
if(NOT ${pkg_name}_FOUND)
|
||||
|
@ -24,7 +24,7 @@ if(Python3_FOUND)
|
|||
message("Python3 library path: ${Python3_LIBRARY}")
|
||||
message("Python3 interpreter: ${Python3_EXECUTABLE}")
|
||||
elseif(Python3_LIBRARY AND Python3_EXECUTABLE AND
|
||||
${Python3_VERSION} VERSION_GREATER_EQUAL "3.7.0" AND ${Python3_VERSION} VERSION_LESS "3.8.9")
|
||||
${Python3_VERSION} VERSION_GREATER_EQUAL "3.7.0" AND ${Python3_VERSION} VERSION_LESS "3.9.9")
|
||||
message(WARNING "Maybe python3 environment is broken.")
|
||||
message("Python3 library path: ${Python3_LIBRARY}")
|
||||
message("Python3 interpreter: ${Python3_EXECUTABLE}")
|
||||
|
|
|
@ -17,7 +17,8 @@ if(NOT TARGET gtest)
|
|||
set(CMAKE_MACOSX_RPATH TRUE)
|
||||
set(CMAKE_CXX_FLAGS "${SECURE_CXX_FLAGS}")
|
||||
|
||||
if(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER "5.0" AND CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "x86_64" AND SYSTEM_TYPE MATCHES "euleros")
|
||||
if(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER "5.0"
|
||||
AND CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "x86_64" AND SYSTEM_TYPE MATCHES "euleros")
|
||||
# -D_GLIBCXX_USE_CXX11_ABI=0 added for the ABI incompatible for libtsdclient.so
|
||||
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_GLIBCXX_USE_CXX11_ABI=0")
|
||||
endif()
|
||||
|
|
|
@ -86,8 +86,10 @@ function(ms_protobuf_generate_py c_var h_var py_var)
|
|||
COMMAND protobuf::protoc -I${file_dir} --cpp_out=${CMAKE_BINARY_DIR}/${rel_path} ${abs_file}
|
||||
COMMAND protobuf::protoc -I${file_dir} --python_out=${CMAKE_BINARY_DIR}/${rel_path} ${abs_file}
|
||||
COMMAND protobuf::protoc -I${file_dir} --python_out=${CMAKE_BINARY_DIR}/${rel_path} ${abs_file}
|
||||
COMMAND perl -pi -e "s/import (.+_pb2.*)/from . import \\1/" "${CMAKE_BINARY_DIR}/${rel_path}/${file_name}_pb2.py"
|
||||
COMMAND cp "${CMAKE_BINARY_DIR}/${rel_path}/${file_name}_pb2.py" "${PROJECT_SOURCE_DIR}/mindspore/train/"
|
||||
COMMAND perl -pi -e "s/import (.+_pb2.*)/from . import \\1/"
|
||||
"${CMAKE_BINARY_DIR}/${rel_path}/${file_name}_pb2.py"
|
||||
COMMAND cp "${CMAKE_BINARY_DIR}/${rel_path}/${file_name}_pb2.py"
|
||||
"${PROJECT_SOURCE_DIR}/mindspore/train/"
|
||||
DEPENDS protobuf::protoc ${abs_file}
|
||||
COMMENT "Running C++ protocol buffer compiler on ${file}" VERBATIM)
|
||||
endforeach()
|
||||
|
|
|
@ -17,7 +17,8 @@ function(find_python_package out_inc out_lib)
|
|||
set(${out_inc} ${inc} PARENT_SCOPE)
|
||||
|
||||
execute_process(
|
||||
COMMAND "${PYTHON_EXECUTABLE}" -c "import distutils.sysconfig as sysconfig; import os; print(os.path.join(sysconfig.get_config_var('LIBDIR'), sysconfig.get_config_var('LDLIBRARY')))"
|
||||
COMMAND "${PYTHON_EXECUTABLE}" -c "import distutils.sysconfig as sysconfig; import os; \
|
||||
print(os.path.join(sysconfig.get_config_var('LIBDIR'), sysconfig.get_config_var('LDLIBRARY')))"
|
||||
RESULT_VARIABLE result
|
||||
OUTPUT_VARIABLE lib)
|
||||
string(STRIP "${lib}" lib)
|
||||
|
|
|
@ -63,11 +63,8 @@ function(ms_build_flatbuffers source_schema_files
|
|||
endif()
|
||||
endfunction()
|
||||
|
||||
function(ms_build_flatbuffers_lite source_schema_files
|
||||
source_schema_dirs
|
||||
custom_target_name
|
||||
generated_output_dir
|
||||
if_inner)
|
||||
function(ms_build_flatbuffers_lite
|
||||
source_schema_files source_schema_dirs custom_target_name generated_output_dir if_inner)
|
||||
|
||||
set(total_schema_dirs "")
|
||||
set(total_generated_files "")
|
||||
|
|
|
@ -1,5 +1,15 @@
|
|||
set(glog_CXXFLAGS "-D_FORTIFY_SOURCE=2 -O2 ${SECURE_CXX_FLAGS} -D_GLIBCXX_USE_CXX11_ABI=0")
|
||||
set(glog_CXXFLAGS "-D_FORTIFY_SOURCE=2 -O2 ${SECURE_CXX_FLAGS} -Dgoogle=mindspore_private")
|
||||
set(glog_CFLAGS "-D_FORTIFY_SOURCE=2 -O2")
|
||||
if(NOT ENABLE_GLIBCXX)
|
||||
set(glog_CXXFLAGS "${glog_CXXFLAGS} -D_GLIBCXX_USE_CXX11_ABI=0")
|
||||
endif()
|
||||
if(BUILD_LITE)
|
||||
set(glog_patch "")
|
||||
set(glog_lib glog)
|
||||
else()
|
||||
set(glog_patch ${CMAKE_SOURCE_DIR}/third_party/patch/glog/glog.patch001)
|
||||
set(glog_lib mindspore_glog)
|
||||
endif()
|
||||
if(ENABLE_GITEE)
|
||||
set(REQ_URL "https://gitee.com/mirrors/glog/repository/archive/v0.4.0.tar.gz")
|
||||
set(MD5 "22fe340ddc231e6c8e46bc295320f8ee")
|
||||
|
@ -7,11 +17,13 @@ else()
|
|||
set(REQ_URL "https://github.com/google/glog/archive/v0.4.0.tar.gz")
|
||||
set(MD5 "0daea8785e6df922d7887755c3d100d0")
|
||||
endif()
|
||||
|
||||
mindspore_add_pkg(glog
|
||||
VER 0.4.0
|
||||
LIBS glog
|
||||
LIBS ${glog_lib}
|
||||
URL ${REQ_URL}
|
||||
MD5 ${MD5}
|
||||
PATCHES ${glog_patch}
|
||||
CMAKE_OPTION -DBUILD_TESTING=OFF -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DBUILD_SHARED_LIBS=ON -DWITH_GFLAGS=OFF)
|
||||
include_directories(${glog_INC})
|
||||
add_library(mindspore::glog ALIAS glog::glog)
|
||||
add_library(mindspore::glog ALIAS glog::${glog_lib})
|
||||
|
|
|
@ -1,10 +1,16 @@
|
|||
set(grpc_USE_STATIC_LIBS ON)
|
||||
if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
|
||||
set(grpc_CXXFLAGS "-fstack-protector-all -Wno-uninitialized -Wno-unused-parameter -fPIC -fvisibility=hidden -D_FORTIFY_SOURCE=2 -O2")
|
||||
set(grpc_CXXFLAGS "-fstack-protector-all -Wno-uninitialized -Wno-unused-parameter -fPIC \
|
||||
-fvisibility=hidden -D_FORTIFY_SOURCE=2 -O2")
|
||||
elseif(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
|
||||
set(grpc_CXXFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -fPIC -fvisibility=hidden -D_FORTIFY_SOURCE=2 -O2")
|
||||
set(grpc_CXXFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter \
|
||||
-fPIC -fvisibility=hidden -D_FORTIFY_SOURCE=2 -O2")
|
||||
else()
|
||||
set(grpc_CXXFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -fPIC -fvisibility=hidden -D_FORTIFY_SOURCE=2 -D_GLIBCXX_USE_CXX11_ABI=0 -O2")
|
||||
set(grpc_CXXFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter \
|
||||
-fPIC -fvisibility=hidden -D_FORTIFY_SOURCE=2 -O2")
|
||||
if(NOT ENABLE_GLIBCXX)
|
||||
set(grpc_CXXFLAGS "${grpc_CXXFLAGS} -D_GLIBCXX_USE_CXX11_ABI=0")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set(grpc_LDFLAGS "-Wl,-z,relro,-z,now,-z,noexecstack")
|
||||
|
@ -106,7 +112,8 @@ function(ms_grpc_generate c_var h_var)
|
|||
COMMAND ${CMAKE_COMMAND} -E make_directory "${CMAKE_BINARY_DIR}/proto"
|
||||
COMMAND protobuf::protoc --version
|
||||
COMMAND protobuf::protoc -I${file_dir} --cpp_out=${CMAKE_BINARY_DIR}/proto
|
||||
--grpc_out=${CMAKE_BINARY_DIR}/proto --plugin=protoc-gen-grpc=$<TARGET_FILE:grpc::grpc_cpp_plugin> ${abs_file}
|
||||
--grpc_out=${CMAKE_BINARY_DIR}/proto
|
||||
--plugin=protoc-gen-grpc=$<TARGET_FILE:grpc::grpc_cpp_plugin> ${abs_file}
|
||||
DEPENDS protobuf::protoc grpc::grpc_cpp_plugin ${abs_file}
|
||||
COMMENT "Running C++ gRPC compiler on ${file}" VERBATIM)
|
||||
endforeach()
|
||||
|
@ -114,5 +121,4 @@ function(ms_grpc_generate c_var h_var)
|
|||
set_source_files_properties(${${c_var}} ${${h_var}} PROPERTIES GENERATED TRUE)
|
||||
set(${c_var} ${${c_var}} PARENT_SCOPE)
|
||||
set(${h_var} ${${h_var}} PARENT_SCOPE)
|
||||
|
||||
endfunction()
|
||||
|
|
|
@ -24,7 +24,9 @@ if(BUILD_LITE)
|
|||
${CMAKE_OPTION})
|
||||
endif()
|
||||
else()
|
||||
set(gtest_CXXFLAGS "${gtest_CXXFLAGS} -D_GLIBCXX_USE_CXX11_ABI=0")
|
||||
if(NOT ENABLE_GLIBCXX)
|
||||
set(gtest_CXXFLAGS "${gtest_CXXFLAGS} -D_GLIBCXX_USE_CXX11_ABI=0")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(ENABLE_GITEE)
|
||||
|
|
|
@ -28,7 +28,9 @@ else()
|
|||
URL ${REQ_URL}
|
||||
MD5 ${MD5}
|
||||
PATCHES ${CMAKE_SOURCE_DIR}/third_party/patch/icu4c/icu4c.patch01
|
||||
CONFIGURE_COMMAND ./icu4c/source/runConfigureICU MacOSX --enable-rpath --disable-tests --disable-samples --disable-icuio --disable-extras ICU_DATA_FILTER_FILE=${CMAKE_BINARY_DIR}/icu4c_filter.json
|
||||
CONFIGURE_COMMAND ./icu4c/source/runConfigureICU MacOSX --enable-rpath --disable-tests
|
||||
--disable-samples --disable-icuio --disable-extras
|
||||
ICU_DATA_FILTER_FILE=${CMAKE_BINARY_DIR}/icu4c_filter.json
|
||||
)
|
||||
else()
|
||||
mindspore_add_pkg(icu4c
|
||||
|
@ -37,7 +39,9 @@ else()
|
|||
URL ${REQ_URL}
|
||||
MD5 ${MD5}
|
||||
PATCHES ${CMAKE_SOURCE_DIR}/third_party/patch/icu4c/icu4c.patch01
|
||||
CONFIGURE_COMMAND ./icu4c/source/runConfigureICU Linux --enable-rpath --disable-tests --disable-samples --disable-icuio --disable-extras ICU_DATA_FILTER_FILE=${CMAKE_BINARY_DIR}/icu4c_filter.json
|
||||
CONFIGURE_COMMAND ./icu4c/source/runConfigureICU Linux --enable-rpath --disable-tests --disable-samples
|
||||
--disable-icuio --disable-extras
|
||||
ICU_DATA_FILTER_FILE=${CMAKE_BINARY_DIR}/icu4c_filter.json
|
||||
)
|
||||
endif()
|
||||
include_directories(${icu4c_INC})
|
||||
|
|
|
@ -8,9 +8,11 @@ else()
|
|||
endif()
|
||||
|
||||
if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
|
||||
set(jpeg_turbo_CFLAGS "-fstack-protector-all -Wno-uninitialized -Wno-unused-parameter -fPIC -D_FORTIFY_SOURCE=2 -O2")
|
||||
set(jpeg_turbo_CFLAGS "-fstack-protector-all -Wno-uninitialized -Wno-unused-parameter -fPIC -D_FORTIFY_SOURCE=2 \
|
||||
-O2")
|
||||
else()
|
||||
set(jpeg_turbo_CFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -fPIC -D_FORTIFY_SOURCE=2 -O2")
|
||||
set(jpeg_turbo_CFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -fPIC \
|
||||
-D_FORTIFY_SOURCE=2 -O2")
|
||||
endif()
|
||||
|
||||
set(jpeg_turbo_LDFLAGS "-Wl,-z,relro,-z,now,-z,noexecstack,-s")
|
||||
|
|
|
@ -7,7 +7,8 @@ if(ENABLE_GITEE)
|
|||
set(REQ_URL "https://gitee.com/mirrors/libevent/repository/archive/release-2.1.12-stable.tar.gz")
|
||||
set(MD5 "c9036513dd9e5b4fa1c81ade23b7ead2")
|
||||
else()
|
||||
set(REQ_URL "https://github.com/libevent/libevent/releases/download/release-2.1.12-stable/libevent-2.1.12-stable.tar.gz")
|
||||
set(REQ_URL
|
||||
"https://github.com/libevent/libevent/releases/download/release-2.1.12-stable/libevent-2.1.12-stable.tar.gz")
|
||||
set(MD5 "b5333f021f880fe76490d8a799cd79f4")
|
||||
endif()
|
||||
|
||||
|
|
|
@ -39,7 +39,8 @@ function(gene_opencl BASEPATH)
|
|||
if(NOT RESULT EQUAL "0")
|
||||
message(FATAL_ERROR "error! when generate ${inc_file_ex}")
|
||||
endif()
|
||||
__exec_cmd(COMMAND sed -i "1i\\static const char *${kernel_name}_source =\\\"\\\\n\\\" \\\\" ${inc_file_ex} WORKING_DIRECTORY ${CL_SRC_DIR})
|
||||
__exec_cmd(COMMAND sed -i "1i\\static const char *${kernel_name}_source =\\\"\\\\n\\\" \\\\"
|
||||
${inc_file_ex} WORKING_DIRECTORY ${CL_SRC_DIR})
|
||||
__exec_cmd(COMMAND sed -i "$a\\\\\;" ${inc_file_ex} WORKING_DIRECTORY ${CL_SRC_DIR})
|
||||
endforeach()
|
||||
endfunction()
|
||||
|
|
|
@ -8,74 +8,159 @@ elseif(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
|
|||
set(opencv_CXXFLAGS "${opencv_CXXFLAGS} -Wno-attributes -Wno-unknown-pragmas")
|
||||
set(opencv_CXXFLAGS "${opencv_CXXFLAGS} -Wno-unused-value -Wno-implicit-fallthrough")
|
||||
else()
|
||||
set(opencv_CXXFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -D_FORTIFY_SOURCE=2 -D_GLIBCXX_USE_CXX11_ABI=0 -O2")
|
||||
set(opencv_CXXFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -D_FORTIFY_SOURCE=2")
|
||||
set(opencv_CXXFLAGS "${opencv_CXXFLAGS} -O2")
|
||||
if(NOT ENABLE_GLIBCXX)
|
||||
set(opencv_CXXFLAGS "${opencv_CXXFLAGS} -D_GLIBCXX_USE_CXX11_ABI=0")
|
||||
endif()
|
||||
set(opencv_CFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -D_FORTIFY_SOURCE=2 -O2")
|
||||
set(opencv_LDFLAGS "-Wl,-z,relro,-z,now,-z,noexecstack")
|
||||
endif()
|
||||
|
||||
if(ENABLE_GITEE)
|
||||
set(REQ_URL "https://gitee.com/mirrors/opencv/repository/archive/4.2.0.tar.gz")
|
||||
set(MD5 "00424c7c4acde1e26ebf17aaa155bf23")
|
||||
if(PYTHON_VERSION MATCHES "3.9")
|
||||
set(REQ_URL "https://gitee.com/mirrors/opencv/repository/archive/4.5.1.tar.gz")
|
||||
set(MD5 "e74309207f2fa88fb6cc417d8ea9ff09")
|
||||
elseif((PYTHON_VERSION MATCHES "3.7") OR (PYTHON_VERSION MATCHES "3.8"))
|
||||
set(REQ_URL "https://gitee.com/mirrors/opencv/repository/archive/4.2.0.tar.gz")
|
||||
set(MD5 "00424c7c4acde1e26ebf17aaa155bf23")
|
||||
else()
|
||||
message("Could not find 'Python 3.8' or 'Python 3.7' or 'Python 3.9'")
|
||||
return()
|
||||
endif()
|
||||
else()
|
||||
set(REQ_URL "https://github.com/opencv/opencv/archive/4.2.0.tar.gz")
|
||||
set(MD5 "e8cb208ce2723481408b604b480183b6")
|
||||
if(PYTHON_VERSION MATCHES "3.9")
|
||||
set(REQ_URL "https://github.com/opencv/opencv/archive/4.5.1.tar.gz")
|
||||
set(MD5 "2205d3169238ec1f184438a96de68513")
|
||||
elseif((PYTHON_VERSION MATCHES "3.7") OR (PYTHON_VERSION MATCHES "3.8"))
|
||||
set(REQ_URL "https://github.com/opencv/opencv/archive/4.2.0.tar.gz")
|
||||
set(MD5 "e8cb208ce2723481408b604b480183b6")
|
||||
else()
|
||||
message("Could not find 'Python 3.8' or 'Python 3.7' or 'Python 3.9'")
|
||||
return()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WIN32)
|
||||
mindspore_add_pkg(opencv
|
||||
VER 4.2.0
|
||||
LIBS libopencv_core420.dll.a libopencv_imgcodecs420.dll.a libopencv_imgproc420.dll.a
|
||||
LIB_PATH x64/mingw/lib
|
||||
URL ${REQ_URL}
|
||||
MD5 ${MD5}
|
||||
CMAKE_OPTION -DCMAKE_BUILD_TYPE=Release -DWITH_PROTOBUF=OFF -DWITH_WEBP=OFF -DWITH_IPP=OFF -DWITH_ADE=OFF
|
||||
-DBUILD_ZLIB=ON
|
||||
-DBUILD_JPEG=ON
|
||||
-DBUILD_PNG=ON
|
||||
-DBUILD_OPENEXR=ON
|
||||
-DBUILD_TESTS=OFF
|
||||
-DBUILD_PERF_TESTS=OFF
|
||||
-DBUILD_opencv_apps=OFF
|
||||
-DCMAKE_SKIP_RPATH=TRUE
|
||||
-DBUILD_opencv_python3=OFF
|
||||
-DBUILD_opencv_videoio=OFF
|
||||
-DWITH_FFMPEG=OFF
|
||||
-DWITH_TIFF=ON
|
||||
-DBUILD_TIFF=OFF
|
||||
-DWITH_JASPER=OFF
|
||||
-DBUILD_JASPER=OFF
|
||||
-DTIFF_INCLUDE_DIR=${tiff_INC}
|
||||
-DTIFF_LIBRARY=${tiff_LIB})
|
||||
if(PYTHON_VERSION MATCHES "3.9")
|
||||
mindspore_add_pkg(opencv
|
||||
VER 4.5.1
|
||||
LIBS libopencv_core451.dll.a libopencv_imgcodecs451.dll.a libopencv_imgproc451.dll.a
|
||||
LIB_PATH x64/mingw/lib
|
||||
URL ${REQ_URL}
|
||||
MD5 ${MD5}
|
||||
CMAKE_OPTION -DCMAKE_BUILD_TYPE=Release -DWITH_PROTOBUF=OFF -DWITH_WEBP=OFF -DWITH_IPP=OFF
|
||||
-DWITH_ADE=OFF
|
||||
-DBUILD_ZLIB=ON
|
||||
-DBUILD_JPEG=ON
|
||||
-DBUILD_PNG=ON
|
||||
-DBUILD_OPENEXR=ON
|
||||
-DBUILD_TESTS=OFF
|
||||
-DBUILD_PERF_TESTS=OFF
|
||||
-DBUILD_opencv_apps=OFF
|
||||
-DCMAKE_SKIP_RPATH=TRUE
|
||||
-DBUILD_opencv_python3=OFF
|
||||
-DBUILD_opencv_videoio=OFF
|
||||
-DWITH_FFMPEG=OFF
|
||||
-DWITH_TIFF=ON
|
||||
-DBUILD_TIFF=OFF
|
||||
-DWITH_JASPER=OFF
|
||||
-DBUILD_JASPER=OFF
|
||||
-DTIFF_INCLUDE_DIR=${tiff_INC}
|
||||
-DTIFF_LIBRARY=${tiff_LIB})
|
||||
elseif(PYTHON_VERSION MATCHES "3.8" OR PYTHON_VERSION MATCHES "3.7")
|
||||
mindspore_add_pkg(opencv
|
||||
VER 4.2.0
|
||||
LIBS libopencv_core420.dll.a libopencv_imgcodecs420.dll.a libopencv_imgproc420.dll.a
|
||||
LIB_PATH x64/mingw/lib
|
||||
URL ${REQ_URL}
|
||||
MD5 ${MD5}
|
||||
CMAKE_OPTION -DCMAKE_BUILD_TYPE=Release -DWITH_PROTOBUF=OFF -DWITH_WEBP=OFF -DWITH_IPP=OFF
|
||||
-DWITH_ADE=OFF
|
||||
-DBUILD_ZLIB=ON
|
||||
-DBUILD_JPEG=ON
|
||||
-DBUILD_PNG=ON
|
||||
-DBUILD_OPENEXR=ON
|
||||
-DBUILD_TESTS=OFF
|
||||
-DBUILD_PERF_TESTS=OFF
|
||||
-DBUILD_opencv_apps=OFF
|
||||
-DCMAKE_SKIP_RPATH=TRUE
|
||||
-DBUILD_opencv_python3=OFF
|
||||
-DBUILD_opencv_videoio=OFF
|
||||
-DWITH_FFMPEG=OFF
|
||||
-DWITH_TIFF=ON
|
||||
-DBUILD_TIFF=OFF
|
||||
-DWITH_JASPER=OFF
|
||||
-DBUILD_JASPER=OFF
|
||||
-DWITH_LAPACK=OFF
|
||||
-DTIFF_INCLUDE_DIR=${tiff_INC}
|
||||
-DTIFF_LIBRARY=${tiff_LIB})
|
||||
endif()
|
||||
else()
|
||||
mindspore_add_pkg(opencv
|
||||
VER 4.2.0
|
||||
LIBS opencv_core opencv_imgcodecs opencv_imgproc
|
||||
URL ${REQ_URL}
|
||||
MD5 ${MD5}
|
||||
CMAKE_OPTION -DCMAKE_BUILD_TYPE=Release -DWITH_PROTOBUF=OFF -DWITH_WEBP=OFF -DWITH_IPP=OFF -DWITH_ADE=OFF
|
||||
-DBUILD_ZLIB=ON
|
||||
-DBUILD_JPEG=ON
|
||||
-DBUILD_PNG=ON
|
||||
-DBUILD_OPENEXR=ON
|
||||
-DBUILD_TESTS=OFF
|
||||
-DBUILD_PERF_TESTS=OFF
|
||||
-DBUILD_opencv_apps=OFF
|
||||
-DCMAKE_SKIP_RPATH=TRUE
|
||||
-DBUILD_opencv_python3=OFF
|
||||
-DWITH_FFMPEG=OFF
|
||||
-DWITH_TIFF=ON
|
||||
-DBUILD_TIFF=OFF
|
||||
-DWITH_JASPER=OFF
|
||||
-DBUILD_JASPER=OFF
|
||||
-DTIFF_INCLUDE_DIR=${tiff_INC}
|
||||
-DTIFF_LIBRARY=${tiff_LIB})
|
||||
if(PYTHON_VERSION MATCHES "3.9")
|
||||
mindspore_add_pkg(opencv
|
||||
VER 4.5.1
|
||||
LIBS opencv_core opencv_imgcodecs opencv_imgproc
|
||||
URL ${REQ_URL}
|
||||
MD5 ${MD5}
|
||||
CMAKE_OPTION -DCMAKE_BUILD_TYPE=Release -DWITH_PROTOBUF=OFF -DWITH_WEBP=OFF -DWITH_IPP=OFF
|
||||
-DWITH_ADE=OFF
|
||||
-DBUILD_ZLIB=ON
|
||||
-DBUILD_JPEG=ON
|
||||
-DBUILD_PNG=ON
|
||||
-DBUILD_OPENEXR=ON
|
||||
-DBUILD_TESTS=OFF
|
||||
-DBUILD_PERF_TESTS=OFF
|
||||
-DBUILD_opencv_apps=OFF
|
||||
-DCMAKE_SKIP_RPATH=TRUE
|
||||
-DBUILD_opencv_python3=OFF
|
||||
-DWITH_FFMPEG=OFF
|
||||
-DWITH_TIFF=ON
|
||||
-DBUILD_TIFF=OFF
|
||||
-DWITH_JASPER=OFF
|
||||
-DBUILD_JASPER=OFF
|
||||
-DTIFF_INCLUDE_DIR=${tiff_INC}
|
||||
-DTIFF_LIBRARY=${tiff_LIB})
|
||||
elseif(PYTHON_VERSION MATCHES "3.8" OR PYTHON_VERSION MATCHES "3.7")
|
||||
mindspore_add_pkg(opencv
|
||||
VER 4.2.0
|
||||
LIBS opencv_core opencv_imgcodecs opencv_imgproc
|
||||
URL ${REQ_URL}
|
||||
MD5 ${MD5}
|
||||
CMAKE_OPTION -DCMAKE_BUILD_TYPE=Release -DWITH_PROTOBUF=OFF -DWITH_WEBP=OFF -DWITH_IPP=OFF
|
||||
-DWITH_ADE=OFF
|
||||
-DBUILD_ZLIB=ON
|
||||
-DBUILD_JPEG=ON
|
||||
-DBUILD_PNG=ON
|
||||
-DBUILD_OPENEXR=ON
|
||||
-DBUILD_TESTS=OFF
|
||||
-DBUILD_PERF_TESTS=OFF
|
||||
-DBUILD_opencv_apps=OFF
|
||||
-DCMAKE_SKIP_RPATH=TRUE
|
||||
-DBUILD_opencv_python3=OFF
|
||||
-DWITH_FFMPEG=OFF
|
||||
-DWITH_TIFF=ON
|
||||
-DBUILD_TIFF=OFF
|
||||
-DWITH_JASPER=OFF
|
||||
-DBUILD_JASPER=OFF
|
||||
-DWITH_LAPACK=OFF
|
||||
-DTIFF_INCLUDE_DIR=${tiff_INC}
|
||||
-DTIFF_LIBRARY=${tiff_LIB})
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WIN32)
|
||||
include_directories(${opencv_INC})
|
||||
add_library(mindspore::opencv_core ALIAS opencv::libopencv_core420.dll.a)
|
||||
add_library(mindspore::opencv_imgcodecs ALIAS opencv::libopencv_imgcodecs420.dll.a)
|
||||
add_library(mindspore::opencv_imgproc ALIAS opencv::libopencv_imgproc420.dll.a)
|
||||
if(PYTHON_VERSION MATCHES "3.9")
|
||||
include_directories(${opencv_INC})
|
||||
add_library(mindspore::opencv_core ALIAS opencv::libopencv_core451.dll.a)
|
||||
add_library(mindspore::opencv_imgcodecs ALIAS opencv::libopencv_imgcodecs451.dll.a)
|
||||
add_library(mindspore::opencv_imgproc ALIAS opencv::libopencv_imgproc451.dll.a)
|
||||
elseif(PYTHON_VERSION MATCHES "3.8" OR PYTHON_VERSION MATCHES "3.7")
|
||||
include_directories(${opencv_INC})
|
||||
add_library(mindspore::opencv_core ALIAS opencv::libopencv_core420.dll.a)
|
||||
add_library(mindspore::opencv_imgcodecs ALIAS opencv::libopencv_imgcodecs420.dll.a)
|
||||
add_library(mindspore::opencv_imgproc ALIAS opencv::libopencv_imgproc420.dll.a)
|
||||
endif()
|
||||
else()
|
||||
include_directories(${opencv_INC}/opencv4)
|
||||
add_library(mindspore::opencv_core ALIAS opencv::opencv_core)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
set(projectq_CXXFLAGS "-fopenmp -O2 -ffast-mast -march=native -DINTRIN")
|
||||
set(projectq_CFLAGS "-fopenmp -O2 -ffast-mast -march=native -DINTRIN")
|
||||
set(projectq_CXXFLAGS "-fopenmp -O2 -ffast-mast -mavx -DINTRIN")
|
||||
set(projectq_CFLAGS "-fopenmp -O2 -ffast-mast -mavx -DINTRIN")
|
||||
|
||||
if(ENABLE_GITEE)
|
||||
set(REQ_URL "https://gitee.com/mirrors/ProjectQ/repository/archive/v0.5.1.tar.gz")
|
||||
|
|
|
@ -1,13 +1,20 @@
|
|||
set(protobuf_USE_STATIC_LIBS ON)
|
||||
if(BUILD_LITE)
|
||||
set(protobuf_CXXFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -fPIC -fvisibility=hidden -D_FORTIFY_SOURCE=2 -O2")
|
||||
set(protobuf_CXXFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter \
|
||||
-fPIC -fvisibility=hidden -D_FORTIFY_SOURCE=2 -O2")
|
||||
else()
|
||||
if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
|
||||
set(protobuf_CXXFLAGS "-fstack-protector-all -Wno-uninitialized -Wno-unused-parameter -fPIC -fvisibility=hidden -D_FORTIFY_SOURCE=2 -O2")
|
||||
set(protobuf_CXXFLAGS "-fstack-protector-all -Wno-uninitialized -Wno-unused-parameter -fPIC \
|
||||
-fvisibility=hidden -D_FORTIFY_SOURCE=2 -O2")
|
||||
elseif(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
|
||||
set(protobuf_CXXFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -fPIC -fvisibility=hidden -D_FORTIFY_SOURCE=2 -O2")
|
||||
set(protobuf_CXXFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter \
|
||||
-fPIC -fvisibility=hidden -D_FORTIFY_SOURCE=2 -O2")
|
||||
else()
|
||||
set(protobuf_CXXFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -fPIC -fvisibility=hidden -D_FORTIFY_SOURCE=2 -D_GLIBCXX_USE_CXX11_ABI=0 -O2")
|
||||
set(protobuf_CXXFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter \
|
||||
-fPIC -fvisibility=hidden -D_FORTIFY_SOURCE=2 -O2")
|
||||
if(NOT ENABLE_GLIBCXX)
|
||||
set(protobuf_CXXFLAGS "${protobuf_CXXFLAGS} -D_GLIBCXX_USE_CXX11_ABI=0")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
@ -69,7 +76,6 @@ function(ms_protobuf_generate c_var h_var)
|
|||
set_source_files_properties(${${c_var}} ${${h_var}} PROPERTIES GENERATED TRUE)
|
||||
set(${c_var} ${${c_var}} PARENT_SCOPE)
|
||||
set(${h_var} ${${h_var}} PARENT_SCOPE)
|
||||
|
||||
endfunction()
|
||||
|
||||
function(ms_protobuf_generate_py c_var h_var py_var)
|
||||
|
@ -100,8 +106,10 @@ function(ms_protobuf_generate_py c_var h_var py_var)
|
|||
COMMAND protobuf::protoc -I${file_dir} --cpp_out=${CMAKE_BINARY_DIR}/proto ${abs_file}
|
||||
COMMAND protobuf::protoc -I${file_dir} --python_out=${CMAKE_BINARY_DIR}/proto ${abs_file}
|
||||
COMMAND protobuf::protoc -I${file_dir} --python_out=${CMAKE_BINARY_DIR}/proto ${abs_file}
|
||||
COMMAND perl -pi.bak -e "s/import (.+_pb2.*)/from . import \\1/" "${CMAKE_BINARY_DIR}/proto/${file_name}_pb2.py"
|
||||
COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_BINARY_DIR}/proto/${file_name}_pb2.py" "${PROJECT_SOURCE_DIR}/mindspore/train/"
|
||||
COMMAND perl -pi.bak -e "s/import (.+_pb2.*)/from . import \\1/"
|
||||
"${CMAKE_BINARY_DIR}/proto/${file_name}_pb2.py"
|
||||
COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_BINARY_DIR}/proto/${file_name}_pb2.py"
|
||||
"${PROJECT_SOURCE_DIR}/mindspore/train/"
|
||||
DEPENDS protobuf::protoc ${abs_file}
|
||||
COMMENT "Running C++ protocol buffer compiler on ${file}" VERBATIM)
|
||||
else()
|
||||
|
@ -114,7 +122,8 @@ function(ms_protobuf_generate_py c_var h_var py_var)
|
|||
COMMAND protobuf::protoc -I${file_dir} --cpp_out=${CMAKE_BINARY_DIR}/proto ${abs_file}
|
||||
COMMAND protobuf::protoc -I${file_dir} --python_out=${CMAKE_BINARY_DIR}/proto ${abs_file}
|
||||
COMMAND protobuf::protoc -I${file_dir} --python_out=${CMAKE_BINARY_DIR}/proto ${abs_file}
|
||||
COMMAND perl -pi -e "s/import (.+_pb2.*)/from . import \\1/" "${CMAKE_BINARY_DIR}/proto/${file_name}_pb2.py"
|
||||
COMMAND perl -pi -e "s/import (.+_pb2.*)/from . import \\1/"
|
||||
"${CMAKE_BINARY_DIR}/proto/${file_name}_pb2.py"
|
||||
COMMAND cp "${CMAKE_BINARY_DIR}/proto/${file_name}_pb2.py" "${PROJECT_SOURCE_DIR}/mindspore/train/"
|
||||
DEPENDS protobuf::protoc ${abs_file}
|
||||
COMMENT "Running C++ protocol buffer compiler on ${file}" VERBATIM)
|
||||
|
@ -124,5 +133,4 @@ function(ms_protobuf_generate_py c_var h_var py_var)
|
|||
set(${c_var} ${${c_var}} PARENT_SCOPE)
|
||||
set(${h_var} ${${h_var}} PARENT_SCOPE)
|
||||
set(${py_var} ${${py_var}} PARENT_SCOPE)
|
||||
|
||||
endfunction()
|
||||
|
|
|
@ -1,36 +1,60 @@
|
|||
set(PYTHON_VERSION ${Python3_VERSION_MAJOR}.${Python3_VERSION_MINOR})
|
||||
|
||||
if(ENABLE_GITEE)
|
||||
if(PYTHON_VERSION MATCHES "3.8")
|
||||
if(PYTHON_VERSION MATCHES "3.9")
|
||||
set(REQ_URL "https://gitee.com/mirrors/pybind11/repository/archive/v2.6.1.tar.gz")
|
||||
set(MD5 "a9b7642031f35daf33a75fe837b3dd31")
|
||||
elseif(PYTHON_VERSION MATCHES "3.8")
|
||||
set(REQ_URL "https://gitee.com/mirrors/pybind11/repository/archive/v2.6.1.tar.gz")
|
||||
set(MD5 "a9b7642031f35daf33a75fe837b3dd31")
|
||||
elseif(PYTHON_VERSION MATCHES "3.7")
|
||||
set(REQ_URL "https://gitee.com/mirrors/pybind11/repository/archive/v2.4.3.tar.gz")
|
||||
set(MD5 "b473a37987ce456ea8cc7aab3f9486f9")
|
||||
else()
|
||||
message("Could not find 'Python 3.8' or 'Python 3.7'")
|
||||
message("Could not find 'Python 3.8' or 'Python 3.7' or 'Python 3.9'")
|
||||
return()
|
||||
endif()
|
||||
else()
|
||||
if(PYTHON_VERSION MATCHES "3.8")
|
||||
if(PYTHON_VERSION MATCHES "3.9")
|
||||
set(REQ_URL "https://github.com/pybind/pybind11/archive/v2.6.1.tar.gz")
|
||||
set(MD5 "32a7811f3db423df4ebfc731a28e5901")
|
||||
elseif(PYTHON_VERSION MATCHES "3.8")
|
||||
set(REQ_URL "https://github.com/pybind/pybind11/archive/v2.6.1.tar.gz")
|
||||
set(MD5 "32a7811f3db423df4ebfc731a28e5901")
|
||||
elseif(PYTHON_VERSION MATCHES "3.7")
|
||||
set(REQ_URL "https://github.com/pybind/pybind11/archive/v2.4.3.tar.gz")
|
||||
set(MD5 "62254c40f89925bb894be421fe4cdef2")
|
||||
else()
|
||||
message("Could not find 'Python 3.8' or 'Python 3.7'")
|
||||
message("Could not find 'Python 3.8' or 'Python 3.7' or 'Python 3.9'")
|
||||
return()
|
||||
endif()
|
||||
endif()
|
||||
set(pybind11_CXXFLAGS "-D_FORTIFY_SOURCE=2 -O2")
|
||||
set(pybind11_CFLAGS "-D_FORTIFY_SOURCE=2 -O2")
|
||||
mindspore_add_pkg(pybind11
|
||||
|
||||
if(PYTHON_VERSION MATCHES "3.9")
|
||||
mindspore_add_pkg(pybind11
|
||||
VER 2.6.1
|
||||
URL ${REQ_URL}
|
||||
MD5 ${MD5}
|
||||
CMAKE_OPTION -DPYBIND11_TEST=OFF -DPYBIND11_LTO_CXX_FLAGS=FALSE
|
||||
)
|
||||
elseif(PYTHON_VERSION MATCHES "3.8")
|
||||
mindspore_add_pkg(pybind11
|
||||
VER 2.6.1
|
||||
URL ${REQ_URL}
|
||||
MD5 ${MD5}
|
||||
CMAKE_OPTION -DPYBIND11_TEST=OFF -DPYBIND11_LTO_CXX_FLAGS=FALSE
|
||||
)
|
||||
else()
|
||||
mindspore_add_pkg(pybind11
|
||||
VER 2.4.3
|
||||
URL ${REQ_URL}
|
||||
MD5 ${MD5}
|
||||
CMAKE_OPTION -DPYBIND11_TEST=OFF -DPYBIND11_LTO_CXX_FLAGS=FALSE
|
||||
)
|
||||
endif()
|
||||
|
||||
include_directories(${pybind11_INC})
|
||||
find_package(pybind11 REQUIRED)
|
||||
set_property(TARGET pybind11::module PROPERTY IMPORTED_GLOBAL TRUE)
|
||||
|
|
|
@ -8,26 +8,40 @@ endif()
|
|||
|
||||
|
||||
if(WIN32)
|
||||
set(sentencepiece_CXXFLAGS "-D_FORTIFY_SOURCE=2 -O2 -Wno-unused-result -Wno-stringop-overflow -Wno-format-extra-args -Wno-format")
|
||||
set(sentencepiece_CXXFLAGS "-D_FORTIFY_SOURCE=2 -O2 -Wno-unused-result -Wno-stringop-overflow \
|
||||
-Wno-format-extra-args -Wno-format")
|
||||
set(sentencepiece_CFLAGS "-D_FORTIFY_SOURCE=2 -O2")
|
||||
mindspore_add_pkg(sentencepiece
|
||||
VER 0.1.92
|
||||
LIBS sentencepiece sentencepiece_train
|
||||
URL ${REQ_URL}
|
||||
CMAKE_OPTION -DCMAKE_BUILD_TYPE=Release -DSPM_USE_BUILTIN_PROTOBUF=ON
|
||||
MD5 ${MD5}
|
||||
)
|
||||
VER 0.1.92
|
||||
LIBS sentencepiece sentencepiece_train
|
||||
URL ${REQ_URL}
|
||||
CMAKE_OPTION -DCMAKE_BUILD_TYPE=Release -DSPM_USE_BUILTIN_PROTOBUF=ON
|
||||
MD5 ${MD5}
|
||||
)
|
||||
else()
|
||||
set(sentencepiece_CXXFLAGS "-D_FORTIFY_SOURCE=2 -O2 -Wno-unused-result -Wno-sign-compare")
|
||||
set(sentencepiece_CFLAGS "-D_FORTIFY_SOURCE=2 -O2")
|
||||
mindspore_add_pkg(sentencepiece
|
||||
if(ENABLE_GLIBCXX)
|
||||
mindspore_add_pkg(sentencepiece
|
||||
VER 0.1.92
|
||||
LIBS sentencepiece sentencepiece_train
|
||||
URL ${REQ_URL}
|
||||
CMAKE_OPTION -DCMAKE_BUILD_TYPE=Release -DSPM_USE_BUILTIN_PROTOBUF=OFF -DSPM_ENABLE_SHARED=OFF -DPROTOBUF_INC=${protobuf_INC}
|
||||
CMAKE_OPTION -DCMAKE_BUILD_TYPE=Release -DSPM_USE_BUILTIN_PROTOBUF=OFF -DSPM_ENABLE_SHARED=OFF
|
||||
-DPROTOBUF_INC=${protobuf_INC}
|
||||
MD5 ${MD5}
|
||||
PATCHES ${CMAKE_SOURCE_DIR}/third_party/patch/sentencepiece/sentencepiece.patch001_cpu
|
||||
)
|
||||
else()
|
||||
mindspore_add_pkg(sentencepiece
|
||||
VER 0.1.92
|
||||
LIBS sentencepiece sentencepiece_train
|
||||
URL ${REQ_URL}
|
||||
CMAKE_OPTION -DCMAKE_BUILD_TYPE=Release -DSPM_USE_BUILTIN_PROTOBUF=OFF -DSPM_ENABLE_SHARED=OFF
|
||||
-DPROTOBUF_INC=${protobuf_INC}
|
||||
MD5 ${MD5}
|
||||
PATCHES ${CMAKE_SOURCE_DIR}/third_party/patch/sentencepiece/sentencepiece.patch001
|
||||
)
|
||||
endif()
|
||||
endif()
|
||||
include_directories(${sentencepiece_INC})
|
||||
add_library(mindspore::sentencepiece ALIAS sentencepiece::sentencepiece)
|
||||
|
|
|
@ -21,9 +21,11 @@ else()
|
|||
set(sqlite_USE_STATIC_LIBS ON)
|
||||
set(sqlite_CXXFLAGS)
|
||||
if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
|
||||
set(sqlite_CFLAGS "-fstack-protector-all -Wno-uninitialized -Wno-unused-parameter -fPIC -D_FORTIFY_SOURCE=2 -O2")
|
||||
set(sqlite_CFLAGS "-fstack-protector-all -Wno-uninitialized -Wno-unused-parameter -fPIC -D_FORTIFY_SOURCE=2 \
|
||||
-O2")
|
||||
else()
|
||||
set(sqlite_CFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -fPIC -D_FORTIFY_SOURCE=2 -O2")
|
||||
set(sqlite_CFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -fPIC \
|
||||
-D_FORTIFY_SOURCE=2 -O2")
|
||||
set(sqlite_LDFLAGS "-Wl,-z,relro,-z,now,-z,noexecstack")
|
||||
endif()
|
||||
mindspore_add_pkg(sqlite
|
||||
|
|
|
@ -2,7 +2,8 @@ if(ENABLE_GITEE)
|
|||
set(REQ_URL "https://gitee.com/mirrors/incubator-tvm/repository/archive/v0.6.0.tar.gz")
|
||||
set(MD5 "7b22965745cf1c6208a4e367fb86a585")
|
||||
else()
|
||||
set(REQ_URL "https://github.com/apache/incubator-tvm/release/download/v0.6.0/apache-tvm-src-v0.6.0-incubating.tar.gz")
|
||||
set(REQ_URL
|
||||
"https://github.com/apache/incubator-tvm/release/download/v0.6.0/apache-tvm-src-v0.6.0-incubating.tar.gz")
|
||||
set(MD5 "2d77a005f0046d937b99c67de82f6438")
|
||||
endif()
|
||||
set(incubator_tvm_predict_CXXFLAGS "-D_FORTIFY_SOURCE=2 -O2")
|
||||
|
|
|
@ -21,6 +21,11 @@ option(ENABLE_DEBUGGER "enable debugger" OFF)
|
|||
option(ENABLE_IBVERBS "enable IBVERBS for parameter server" OFF)
|
||||
option(ENABLE_PYTHON "Enable python" ON)
|
||||
option(ENABLE_ACL "enable acl" OFF)
|
||||
option(ENABLE_GLIBCXX "enable_glibcxx" OFF)
|
||||
|
||||
if(NOT ENABLE_D AND NOT ENABLE_TESTCASES AND NOT ENABLE_ACL)
|
||||
set(ENABLE_GLIBCXX ON)
|
||||
endif()
|
||||
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
if(WIN32)
|
||||
|
@ -40,14 +45,10 @@ if(ENABLE_COVERAGE)
|
|||
endif()
|
||||
|
||||
if(ENABLE_ASAN)
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
set(OPTION_CXX_FLAGS "${OPTION_CXX_FLAGS} -fsanitize=address -fsanitize-recover=address \
|
||||
-fno-omit-frame-pointer -fsanitize=undefined")
|
||||
else()
|
||||
set(OPTION_CXX_FLAGS "${OPTION_CXX_FLAGS} -fsanitize=address -fno-omit-frame-pointer \
|
||||
-static-libsan -fsanitize=undefined")
|
||||
set(OPTION_CXX_FLAGS "${OPTION_CXX_FLAGS} -fsanitize=address -fsanitize-recover=address -fno-omit-frame-pointer")
|
||||
if(NOT CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
set(OPTION_CXX_FLAGS "${OPTION_CXX_FLAGS} -static-libsan")
|
||||
endif()
|
||||
set(OPTION_CXX_FLAGS "${OPTION_CXX_FLAGS} -mcmodel=medium")
|
||||
endif()
|
||||
|
||||
if(DEBUG_MODE)
|
||||
|
|
|
@ -5,6 +5,7 @@ include(GNUInstallDirs)
|
|||
# set package information
|
||||
set(CPACK_PACKAGE_NAME ${PROJECT_NAME})
|
||||
set(CPACK_GENERATOR "External")
|
||||
set(CPACK_CMAKE_GENERATOR "Ninja")
|
||||
set(CPACK_EXTERNAL_PACKAGE_SCRIPT ${CMAKE_SOURCE_DIR}/cmake/package_script.cmake)
|
||||
set(CPACK_EXTERNAL_ENABLE_STAGING true)
|
||||
set(CPACK_TEMPORARY_PACKAGE_FILE_NAME ${CMAKE_SOURCE_DIR}/build/package/mindspore)
|
||||
|
@ -76,7 +77,7 @@ install(
|
|||
)
|
||||
|
||||
if(USE_GLOG)
|
||||
file(GLOB_RECURSE GLOG_LIB_LIST ${glog_LIBPATH}/libglog*)
|
||||
file(GLOB_RECURSE GLOG_LIB_LIST ${glog_LIBPATH}/libmindspore_glog*)
|
||||
install(
|
||||
FILES ${GLOG_LIB_LIST}
|
||||
DESTINATION ${INSTALL_LIB_DIR}
|
||||
|
|
|
@ -4,8 +4,6 @@ set(RUNTIME_PKG_NAME ${MAIN_DIR}-${RUNTIME_COMPONENT_NAME})
|
|||
|
||||
set(CODEGEN_ROOT_DIR ${RUNTIME_PKG_NAME}/tools/codegen)
|
||||
set(CONVERTER_ROOT_DIR ${RUNTIME_PKG_NAME}/tools/converter)
|
||||
set(BENCHMARK_ROOT_DIR ${RUNTIME_PKG_NAME}/tools/benchmark)
|
||||
set(BENCHMARK_TRAIN_ROOT_DIR ${RUNTIME_PKG_NAME}/tools/benchmark_train)
|
||||
set(CROPPER_ROOT_DIR ${RUNTIME_PKG_NAME}/tools/cropper)
|
||||
|
||||
if(SUPPORT_TRAIN)
|
||||
|
@ -15,6 +13,9 @@ if(SUPPORT_TRAIN)
|
|||
set(MIND_DATA_INC_DIR ${RUNTIME_PKG_NAME}/train/minddata/include)
|
||||
set(MIND_DATA_LIB_DIR ${RUNTIME_PKG_NAME}/train/minddata/lib)
|
||||
set(TURBO_DIR ${RUNTIME_PKG_NAME}/train/minddata/third_party/libjpeg-turbo)
|
||||
set(MINDSPORE_LITE_LIB_NAME libmindspore-lite-train)
|
||||
set(BENCHMARK_NAME benchmark_train)
|
||||
set(BENCHMARK_ROOT_DIR ${RUNTIME_PKG_NAME}/tools/benchmark_train)
|
||||
else()
|
||||
set(RUNTIME_DIR ${RUNTIME_PKG_NAME}/inference)
|
||||
set(RUNTIME_INC_DIR ${RUNTIME_PKG_NAME}/inference/include)
|
||||
|
@ -22,6 +23,9 @@ else()
|
|||
set(MIND_DATA_INC_DIR ${RUNTIME_PKG_NAME}/inference/minddata/include)
|
||||
set(MIND_DATA_LIB_DIR ${RUNTIME_PKG_NAME}/inference/minddata/lib)
|
||||
set(TURBO_DIR ${RUNTIME_PKG_NAME}/inference/minddata/third_party/libjpeg-turbo)
|
||||
set(MINDSPORE_LITE_LIB_NAME libmindspore-lite)
|
||||
set(BENCHMARK_NAME benchmark)
|
||||
set(BENCHMARK_ROOT_DIR ${RUNTIME_PKG_NAME}/tools/benchmark)
|
||||
endif()
|
||||
|
||||
if(BUILD_MINDDATA STREQUAL "full")
|
||||
|
@ -141,22 +145,29 @@ if(PLATFORM_ARM64)
|
|||
install(DIRECTORY ${TOP_DIR}/mindspore/lite/include/ DESTINATION ${RUNTIME_INC_DIR}
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "train*" EXCLUDE)
|
||||
endif()
|
||||
install(FILES ${TOP_DIR}/mindspore/lite/build/src/libmindspore-lite.so DESTINATION ${RUNTIME_LIB_DIR}
|
||||
install(FILES ${TOP_DIR}/mindspore/lite/build/src/${MINDSPORE_LITE_LIB_NAME}.so DESTINATION ${RUNTIME_LIB_DIR}
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
install(FILES ${TOP_DIR}/mindspore/lite/build/src/libmindspore-lite.a DESTINATION ${RUNTIME_LIB_DIR}
|
||||
install(FILES ${TOP_DIR}/mindspore/lite/build/src/${MINDSPORE_LITE_LIB_NAME}.a DESTINATION ${RUNTIME_LIB_DIR}
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
install(FILES ${TOP_DIR}/mindspore/core/ir/dtype/type_id.h DESTINATION ${RUNTIME_INC_DIR}/ir/dtype
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
install(DIRECTORY ${TOP_DIR}/include/api/ DESTINATION ${RUNTIME_INC_DIR}/api
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ascend* ops*" EXCLUDE)
|
||||
install(DIRECTORY ${TOP_DIR}/mindspore/lite/build/operator_library DESTINATION ${CODEGEN_ROOT_DIR}
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ops*" EXCLUDE)
|
||||
file(GLOB NNACL_FILES GLOB ${TOP_DIR}/mindspore/lite/nnacl/*.h)
|
||||
install(FILES ${NNACL_FILES} DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
install(DIRECTORY ${TOP_DIR}/mindspore/lite/nnacl/base DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h")
|
||||
install(DIRECTORY ${TOP_DIR}/mindspore/lite/nnacl/int8 DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h")
|
||||
install(DIRECTORY ${TOP_DIR}/mindspore/lite/nnacl/fp32 DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h")
|
||||
install(DIRECTORY ${TOP_DIR}/mindspore/lite/nnacl/intrinsics DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h")
|
||||
install(DIRECTORY ${TOP_DIR}/mindspore/lite/micro/coder/wrapper DESTINATION ${CODEGEN_ROOT_DIR}/include
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h")
|
||||
install(TARGETS wrapper ARCHIVE DESTINATION ${CODEGEN_ROOT_DIR}/lib COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
if(ENABLE_TOOLS)
|
||||
install(TARGETS benchmark RUNTIME DESTINATION ${BENCHMARK_ROOT_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
if(SUPPORT_TRAIN)
|
||||
install(TARGETS benchmark_train RUNTIME DESTINATION ${BENCHMARK_TRAIN_ROOT_DIR}
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
endif()
|
||||
install(TARGETS ${BENCHMARK_NAME} RUNTIME DESTINATION ${BENCHMARK_ROOT_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
endif()
|
||||
elseif(PLATFORM_ARM32)
|
||||
if(SUPPORT_TRAIN)
|
||||
|
@ -166,22 +177,29 @@ elseif(PLATFORM_ARM32)
|
|||
install(DIRECTORY ${TOP_DIR}/mindspore/lite/include/ DESTINATION ${RUNTIME_INC_DIR}
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "train*" EXCLUDE)
|
||||
endif()
|
||||
install(FILES ${TOP_DIR}/mindspore/lite/build/src/libmindspore-lite.so DESTINATION ${RUNTIME_LIB_DIR}
|
||||
install(FILES ${TOP_DIR}/mindspore/lite/build/src/${MINDSPORE_LITE_LIB_NAME}.so DESTINATION ${RUNTIME_LIB_DIR}
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
install(FILES ${TOP_DIR}/mindspore/lite/build/src/libmindspore-lite.a DESTINATION ${RUNTIME_LIB_DIR}
|
||||
install(FILES ${TOP_DIR}/mindspore/lite/build/src/${MINDSPORE_LITE_LIB_NAME}.a DESTINATION ${RUNTIME_LIB_DIR}
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
install(FILES ${TOP_DIR}/mindspore/core/ir/dtype/type_id.h DESTINATION ${RUNTIME_INC_DIR}/ir/dtype
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
install(DIRECTORY ${TOP_DIR}/include/api/ DESTINATION ${RUNTIME_INC_DIR}/api
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ascend*" EXCLUDE)
|
||||
install(DIRECTORY ${TOP_DIR}/mindspore/lite/build/operator_library DESTINATION ${CODEGEN_ROOT_DIR}
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ops*" EXCLUDE)
|
||||
file(GLOB NNACL_FILES GLOB ${TOP_DIR}/mindspore/lite/nnacl/*.h)
|
||||
install(FILES ${NNACL_FILES} DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
install(DIRECTORY ${TOP_DIR}/mindspore/lite/nnacl/base DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h")
|
||||
install(DIRECTORY ${TOP_DIR}/mindspore/lite/nnacl/int8 DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h")
|
||||
install(DIRECTORY ${TOP_DIR}/mindspore/lite/nnacl/fp32 DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h")
|
||||
install(DIRECTORY ${TOP_DIR}/mindspore/lite/nnacl/intrinsics DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h")
|
||||
install(DIRECTORY ${TOP_DIR}/mindspore/lite/micro/coder/wrapper DESTINATION ${CODEGEN_ROOT_DIR}/include
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h")
|
||||
install(TARGETS wrapper ARCHIVE DESTINATION ${CODEGEN_ROOT_DIR}/lib COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
if(ENABLE_TOOLS)
|
||||
install(TARGETS benchmark RUNTIME DESTINATION ${BENCHMARK_ROOT_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
if(SUPPORT_TRAIN)
|
||||
install(TARGETS benchmark_train RUNTIME DESTINATION ${BENCHMARK_TRAIN_ROOT_DIR}
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
endif()
|
||||
install(TARGETS ${BENCHMARK_NAME} RUNTIME DESTINATION ${BENCHMARK_ROOT_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
endif()
|
||||
elseif(WIN32)
|
||||
get_filename_component(CXX_DIR ${CMAKE_CXX_COMPILER} PATH)
|
||||
|
@ -198,7 +216,7 @@ elseif(WIN32)
|
|||
install(TARGETS codegen RUNTIME DESTINATION ${CODEGEN_ROOT_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
endif()
|
||||
if(ENABLE_TOOLS)
|
||||
install(TARGETS benchmark RUNTIME DESTINATION ${BENCHMARK_ROOT_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
install(TARGETS ${BENCHMARK_NAME} RUNTIME DESTINATION ${BENCHMARK_ROOT_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
endif()
|
||||
install(FILES ${LIB_LIST} DESTINATION ${RUNTIME_LIB_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
install(DIRECTORY ${flatbuffers_INC} DESTINATION ${RUNTIME_INC_DIR}/third_party/
|
||||
|
@ -213,12 +231,12 @@ elseif(WIN32)
|
|||
install(FILES ${TOP_DIR}/mindspore/core/ir/dtype/type_id.h DESTINATION ${RUNTIME_INC_DIR}/ir/dtype
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
install(DIRECTORY ${TOP_DIR}/include/api/ DESTINATION ${RUNTIME_INC_DIR}/api
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ascend*" EXCLUDE)
|
||||
install(FILES ${TOP_DIR}/build/mindspore/src/libmindspore-lite.a DESTINATION ${RUNTIME_LIB_DIR}
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ops*" EXCLUDE)
|
||||
install(FILES ${TOP_DIR}/build/mindspore/src/${MINDSPORE_LITE_LIB_NAME}.a DESTINATION ${RUNTIME_LIB_DIR}
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
install(FILES ${TOP_DIR}/build/mindspore/src/libmindspore-lite.dll.a DESTINATION ${RUNTIME_LIB_DIR}
|
||||
install(FILES ${TOP_DIR}/build/mindspore/src/${MINDSPORE_LITE_LIB_NAME}.dll.a DESTINATION ${RUNTIME_LIB_DIR}
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
install(FILES ${TOP_DIR}/build/mindspore/src/libmindspore-lite.dll DESTINATION ${RUNTIME_LIB_DIR}
|
||||
install(FILES ${TOP_DIR}/build/mindspore/src/${MINDSPORE_LITE_LIB_NAME}.dll DESTINATION ${RUNTIME_LIB_DIR}
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
else()
|
||||
if(SUPPORT_TRAIN)
|
||||
|
@ -231,10 +249,10 @@ else()
|
|||
install(FILES ${TOP_DIR}/mindspore/core/ir/dtype/type_id.h DESTINATION ${RUNTIME_INC_DIR}/ir/dtype
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
install(DIRECTORY ${TOP_DIR}/include/api/ DESTINATION ${RUNTIME_INC_DIR}/api
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ascend*" EXCLUDE)
|
||||
install(FILES ${TOP_DIR}/mindspore/lite/build/src/libmindspore-lite.so DESTINATION ${RUNTIME_LIB_DIR}
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ops*" EXCLUDE)
|
||||
install(FILES ${TOP_DIR}/mindspore/lite/build/src/${MINDSPORE_LITE_LIB_NAME}.so DESTINATION ${RUNTIME_LIB_DIR}
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
install(FILES ${TOP_DIR}/mindspore/lite/build/src/libmindspore-lite.a DESTINATION ${RUNTIME_LIB_DIR}
|
||||
install(FILES ${TOP_DIR}/mindspore/lite/build/src/${MINDSPORE_LITE_LIB_NAME}.a DESTINATION ${RUNTIME_LIB_DIR}
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
if(ENABLE_CONVERTER)
|
||||
install(TARGETS converter_lite RUNTIME DESTINATION ${CONVERTER_ROOT_DIR}/converter
|
||||
|
@ -244,16 +262,32 @@ else()
|
|||
install(FILES ${glog_LIBPATH}/libglog.so.0.4.0
|
||||
DESTINATION ${CONVERTER_ROOT_DIR}/third_party/glog/lib RENAME libglog.so.0
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
install(DIRECTORY ${TOP_DIR}/mindspore/lite/build/operator_library DESTINATION ${CODEGEN_ROOT_DIR}
|
||||
file(GLOB NNACL_FILES GLOB ${TOP_DIR}/mindspore/lite/nnacl/*.h)
|
||||
install(FILES ${NNACL_FILES} DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
install(DIRECTORY ${TOP_DIR}/mindspore/lite/nnacl/base DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h")
|
||||
install(DIRECTORY ${TOP_DIR}/mindspore/lite/nnacl/int8 DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h")
|
||||
install(DIRECTORY ${TOP_DIR}/mindspore/lite/nnacl/fp32 DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h")
|
||||
install(DIRECTORY ${TOP_DIR}/mindspore/lite/nnacl/intrinsics DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h")
|
||||
install(DIRECTORY ${TOP_DIR}/mindspore/lite/micro/coder/wrapper DESTINATION ${CODEGEN_ROOT_DIR}/include
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h")
|
||||
install(TARGETS wrapper ARCHIVE DESTINATION ${CODEGEN_ROOT_DIR}/lib COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
set(MICRO_CMSIS_DIR ${CMAKE_BINARY_DIR}/cmsis/CMSIS)
|
||||
install(DIRECTORY ${MICRO_CMSIS_DIR}/Core/Include DESTINATION ${CODEGEN_ROOT_DIR}/third_party/include/CMSIS/Core
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h")
|
||||
install(DIRECTORY ${MICRO_CMSIS_DIR}/DSP/Include DESTINATION ${CODEGEN_ROOT_DIR}/third_party/include/CMSIS/DSP
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h")
|
||||
install(DIRECTORY ${MICRO_CMSIS_DIR}/NN/Include DESTINATION ${CODEGEN_ROOT_DIR}/third_party/include/CMSIS/NN
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h")
|
||||
install(TARGETS cmsis_nn ARCHIVE DESTINATION ${CODEGEN_ROOT_DIR}/third_party/lib
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
install(TARGETS codegen RUNTIME DESTINATION ${CODEGEN_ROOT_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
endif()
|
||||
if(ENABLE_TOOLS)
|
||||
install(TARGETS benchmark RUNTIME DESTINATION ${BENCHMARK_ROOT_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
if(SUPPORT_TRAIN)
|
||||
install(TARGETS benchmark_train RUNTIME DESTINATION ${BENCHMARK_TRAIN_ROOT_DIR}
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
endif()
|
||||
install(TARGETS ${BENCHMARK_NAME} RUNTIME DESTINATION ${BENCHMARK_ROOT_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
install(TARGETS cropper RUNTIME DESTINATION ${CROPPER_ROOT_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
install(FILES ${TOP_DIR}/mindspore/lite/build/tools/cropper/cropper_mapping_cpu.cfg
|
||||
DESTINATION ${CROPPER_ROOT_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# find exec
|
||||
find_package(Python3 3.7 COMPONENTS Interpreter)
|
||||
find_package(Python3 COMPONENTS Interpreter)
|
||||
if(NOT Python3_FOUND)
|
||||
message(FATAL_ERROR "No python3 found.")
|
||||
endif()
|
||||
|
@ -7,8 +7,8 @@ endif()
|
|||
set(PYTHON ${Python3_EXECUTABLE})
|
||||
set(PYTHON_VERSION ${Python3_VERSION_MAJOR}.${Python3_VERSION_MINOR})
|
||||
|
||||
if(NOT (PYTHON_VERSION MATCHES "3.8" OR PYTHON_VERSION MATCHES "3.7"))
|
||||
message(FATAL_ERROR "FIND PYTHON VERSION ${PYTHON_VERSION} BUT CAN NOT MATCH PYTHON VERSION 3.8 OR 3.7")
|
||||
if(NOT (PYTHON_VERSION MATCHES "3.9" OR PYTHON_VERSION MATCHES "3.8" OR PYTHON_VERSION MATCHES "3.7"))
|
||||
message(FATAL_ERROR "FIND PYTHON VERSION ${PYTHON_VERSION} BUT CAN NOT MATCH PYTHON VERSION 3.9 OR 3.8 OR 3.7")
|
||||
endif()
|
||||
|
||||
find_package(Git)
|
||||
|
@ -24,32 +24,38 @@ set(MS_PACK_ROOT_DIR ${MS_ROOT_DIR}/build/package)
|
|||
|
||||
# set package file name
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||
if(PYTHON_VERSION MATCHES "3.8")
|
||||
if(PYTHON_VERSION MATCHES "3.9")
|
||||
set(PY_TAGS "cp39-cp39")
|
||||
elseif(PYTHON_VERSION MATCHES "3.8")
|
||||
set(PY_TAGS "cp38-cp38")
|
||||
elseif(PYTHON_VERSION MATCHES "3.7")
|
||||
set(PY_TAGS "cp37-cp37m")
|
||||
else()
|
||||
message("Could not find 'Python 3.8' or 'Python 3.7'")
|
||||
message("Could not find 'Python 3.9' OR 'Python 3.8' or 'Python 3.7'")
|
||||
return()
|
||||
endif()
|
||||
string(TOLOWER linux_${CMAKE_HOST_SYSTEM_PROCESSOR} PLATFORM_TAG)
|
||||
elseif(CMAKE_SYSTEM_NAME MATCHES "Darwin")
|
||||
if(PYTHON_VERSION MATCHES "3.8")
|
||||
if(PYTHON_VERSION MATCHES "3.9")
|
||||
set(PY_TAGS "py39-none")
|
||||
elseif(PYTHON_VERSION MATCHES "3.8")
|
||||
set(PY_TAGS "py38-none")
|
||||
elseif(PYTHON_VERSION MATCHES "3.7")
|
||||
set(PY_TAGS "py37-none")
|
||||
else()
|
||||
message("Could not find 'Python 3.8' or 'Python 3.7'")
|
||||
message("Could not find 'Python 3.9' OR 'Python 3.8' or 'Python 3.7'")
|
||||
return()
|
||||
endif()
|
||||
set(PLATFORM_TAG "any")
|
||||
elseif(CMAKE_SYSTEM_NAME MATCHES "Windows")
|
||||
if(PYTHON_VERSION MATCHES "3.8")
|
||||
if(PYTHON_VERSION MATCHES "3.9")
|
||||
set(PY_TAGS "cp39-cp39")
|
||||
elseif(PYTHON_VERSION MATCHES "3.8")
|
||||
set(PY_TAGS "cp38-cp38")
|
||||
elseif(PYTHON_VERSION MATCHES "3.7")
|
||||
set(PY_TAGS "cp37-cp37m")
|
||||
else()
|
||||
message("Could not find 'Python 3.8' or 'Python 3.7'")
|
||||
message("Could not find 'Python 3.9' OR 'Python 3.8' or 'Python 3.7'")
|
||||
return()
|
||||
endif()
|
||||
set(PLATFORM_TAG "win_amd64")
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -1 +1 @@
|
|||
Subproject commit 40e5c42a12c4daa1530e8db9d006d5b3be5b378f
|
||||
Subproject commit 8770bfcdd73777207d562597e21c63179af598f2
|
|
@ -103,8 +103,9 @@ class MS_API GraphCell final : public Cell<GraphCell> {
|
|||
std::vector<MSTensor> GetOutputs();
|
||||
|
||||
private:
|
||||
friend class Model;
|
||||
friend class ModelImpl;
|
||||
Status Load();
|
||||
Status Load(uint32_t device_id);
|
||||
|
||||
std::shared_ptr<Graph> graph_;
|
||||
std::shared_ptr<GraphImpl> executor_;
|
||||
|
|
|
@ -24,162 +24,219 @@
|
|||
#include "include/api/dual_abi_helper.h"
|
||||
|
||||
namespace mindspore {
|
||||
constexpr auto kDeviceTypeAscend310 = "Ascend310";
|
||||
constexpr auto kDeviceTypeAscend910 = "Ascend910";
|
||||
constexpr auto kDeviceTypeGPU = "GPU";
|
||||
enum DeviceType {
|
||||
kCPU = 0,
|
||||
kMaliGPU,
|
||||
kNvidiaGPU,
|
||||
kKirinNPU,
|
||||
kAscend910,
|
||||
kAscend310,
|
||||
// add new type here
|
||||
kInvalidDeviceType = 100,
|
||||
};
|
||||
|
||||
struct MS_API Context {
|
||||
class Allocator;
|
||||
class DeviceInfoContext;
|
||||
|
||||
class MS_API Context {
|
||||
public:
|
||||
Context();
|
||||
virtual ~Context() = default;
|
||||
~Context() = default;
|
||||
|
||||
void SetThreadNum(int32_t thread_num);
|
||||
int32_t GetThreadNum() const;
|
||||
|
||||
void SetAllocator(const std::shared_ptr<Allocator> &allocator);
|
||||
std::shared_ptr<Allocator> GetAllocator() const;
|
||||
|
||||
std::vector<std::shared_ptr<DeviceInfoContext>> &MutableDeviceInfo();
|
||||
|
||||
private:
|
||||
struct Data;
|
||||
std::shared_ptr<Data> data;
|
||||
std::shared_ptr<Data> data_;
|
||||
};
|
||||
|
||||
struct MS_API GlobalContext : public Context {
|
||||
class MS_API DeviceInfoContext : public std::enable_shared_from_this<DeviceInfoContext> {
|
||||
public:
|
||||
static std::shared_ptr<Context> GetGlobalContext();
|
||||
struct Data;
|
||||
|
||||
static inline void SetGlobalDeviceTarget(const std::string &device_target);
|
||||
static inline std::string GetGlobalDeviceTarget();
|
||||
DeviceInfoContext();
|
||||
virtual ~DeviceInfoContext() = default;
|
||||
virtual enum DeviceType GetDeviceType() const = 0;
|
||||
|
||||
static void SetGlobalDeviceID(const uint32_t &device_id);
|
||||
static uint32_t GetGlobalDeviceID();
|
||||
template <class T>
|
||||
std::shared_ptr<T> Cast() {
|
||||
static_assert(std::is_base_of<DeviceInfoContext, T>::value, "Wrong cast type.");
|
||||
if (GetDeviceType() != T().GetDeviceType()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
static inline void SetGlobalDumpConfigPath(const std::string &cfg_path);
|
||||
static inline std::string GetGlobalDumpConfigPath();
|
||||
return std::static_pointer_cast<T>(shared_from_this());
|
||||
}
|
||||
|
||||
protected:
|
||||
std::shared_ptr<Data> data_;
|
||||
};
|
||||
|
||||
class MS_API CPUDeviceInfo : public DeviceInfoContext {
|
||||
public:
|
||||
enum DeviceType GetDeviceType() const override { return DeviceType::kCPU; };
|
||||
|
||||
/// \brief Set the thread affinity to CPU cores.
|
||||
///
|
||||
/// \param mode: 0: no affinities, 1: big cores first, 2: little cores first
|
||||
void SetThreadAffinity(int mode);
|
||||
int GetThreadAffinity() const;
|
||||
void SetEnableFP16(bool is_fp16);
|
||||
bool GetEnableFP16() const;
|
||||
};
|
||||
|
||||
class MS_API MaliGPUDeviceInfo : public DeviceInfoContext {
|
||||
public:
|
||||
enum DeviceType GetDeviceType() const override { return DeviceType::kMaliGPU; };
|
||||
|
||||
void SetEnableFP16(bool is_fp16);
|
||||
bool GetEnableFP16() const;
|
||||
};
|
||||
|
||||
class MS_API KirinNPUDeviceInfo : public DeviceInfoContext {
|
||||
public:
|
||||
enum DeviceType GetDeviceType() const override { return DeviceType::kKirinNPU; };
|
||||
|
||||
void SetFrequency(int frequency);
|
||||
int GetFrequency() const;
|
||||
};
|
||||
|
||||
class MS_API NvidiaGPUDeviceInfo : public DeviceInfoContext {
|
||||
public:
|
||||
enum DeviceType GetDeviceType() const override { return DeviceType::kNvidiaGPU; };
|
||||
|
||||
void SetDeviceID(uint32_t device_id);
|
||||
uint32_t GetDeviceID() const;
|
||||
|
||||
void SetGpuTrtInferMode(bool gpu_trt_infer_mode);
|
||||
bool GetGpuTrtInferMode() const;
|
||||
};
|
||||
|
||||
class MS_API Ascend910DeviceInfo : public DeviceInfoContext {
|
||||
public:
|
||||
enum DeviceType GetDeviceType() const override { return DeviceType::kAscend910; };
|
||||
|
||||
void SetDeviceID(uint32_t device_id);
|
||||
uint32_t GetDeviceID() const;
|
||||
};
|
||||
|
||||
class MS_API Ascend310DeviceInfo : public DeviceInfoContext {
|
||||
public:
|
||||
enum DeviceType GetDeviceType() const override { return DeviceType::kAscend310; };
|
||||
|
||||
void SetDeviceID(uint32_t device_id);
|
||||
uint32_t GetDeviceID() const;
|
||||
|
||||
inline void SetDumpConfigPath(const std::string &cfg_path);
|
||||
inline std::string GetDumpConfigPath() const;
|
||||
|
||||
// aipp config file
|
||||
inline void SetInsertOpConfigPath(const std::string &cfg_path);
|
||||
inline std::string GetInsertOpConfigPath() const;
|
||||
|
||||
// nchw or nhwc
|
||||
inline void SetInputFormat(const std::string &format);
|
||||
inline std::string GetInputFormat() const;
|
||||
|
||||
// Mandatory while dynamic batch: e.g. "input_op_name1: 1,2,3,4;input_op_name2: 4,3,2,1"
|
||||
inline void SetInputShape(const std::string &shape);
|
||||
inline std::string GetInputShape() const;
|
||||
|
||||
void SetInputShapeMap(const std::map<int, std::vector<int>> &shape);
|
||||
std::map<int, std::vector<int>> GetInputShapeMap() const;
|
||||
|
||||
void SetDynamicBatchSize(const std::vector<size_t> &dynamic_batch_size);
|
||||
inline std::string GetDynamicBatchSize() const;
|
||||
|
||||
// FP32, UINT8 or FP16, default as FP32
|
||||
void SetOutputType(enum DataType output_type);
|
||||
enum DataType GetOutputType() const;
|
||||
|
||||
// "force_fp16", "allow_fp32_to_fp16", "must_keep_origin_dtype" or "allow_mix_precision", default as "force_fp16"
|
||||
inline void SetPrecisionMode(const std::string &precision_mode);
|
||||
inline std::string GetPrecisionMode() const;
|
||||
|
||||
// Optional "high_performance" and "high_precision", "high_performance" is set as default
|
||||
inline void SetOpSelectImplMode(const std::string &op_select_impl_mode);
|
||||
inline std::string GetOpSelectImplMode() const;
|
||||
|
||||
inline void SetFusionSwitchConfigPath(const std::string &cfg_path);
|
||||
inline std::string GetFusionSwitchConfigPath() const;
|
||||
|
||||
// Optional "l1_optimize", "l2_optimize", "off_optimize" or "l1_and_l2_optimize", default as "l2_optimize"
|
||||
inline void SetBufferOptimizeMode(const std::string &buffer_optimize_mode);
|
||||
inline std::string GetBufferOptimizeMode() const;
|
||||
|
||||
private:
|
||||
// api without std::string
|
||||
static void SetGlobalDeviceTarget(const std::vector<char> &device_target);
|
||||
static std::vector<char> GetGlobalDeviceTargetChar();
|
||||
void SetDumpConfigPath(const std::vector<char> &cfg_path);
|
||||
std::vector<char> GetDumpConfigPathChar() const;
|
||||
|
||||
static void SetGlobalDumpConfigPath(const std::vector<char> &cfg_path);
|
||||
static std::vector<char> GetGlobalDumpConfigPathChar();
|
||||
void SetInsertOpConfigPath(const std::vector<char> &cfg_path);
|
||||
std::vector<char> GetInsertOpConfigPathChar() const;
|
||||
|
||||
void SetInputFormat(const std::vector<char> &format);
|
||||
std::vector<char> GetInputFormatChar() const;
|
||||
|
||||
void SetInputShape(const std::vector<char> &shape);
|
||||
std::vector<char> GetInputShapeChar() const;
|
||||
|
||||
std::vector<char> GetDynamicBatchSizeChar() const;
|
||||
|
||||
void SetPrecisionMode(const std::vector<char> &precision_mode);
|
||||
std::vector<char> GetPrecisionModeChar() const;
|
||||
|
||||
void SetOpSelectImplMode(const std::vector<char> &op_select_impl_mode);
|
||||
std::vector<char> GetOpSelectImplModeChar() const;
|
||||
|
||||
void SetFusionSwitchConfigPath(const std::vector<char> &cfg_path);
|
||||
std::vector<char> GetFusionSwitchConfigPathChar() const;
|
||||
|
||||
void SetBufferOptimizeMode(const std::vector<char> &buffer_optimize_mode);
|
||||
std::vector<char> GetBufferOptimizeModeChar() const;
|
||||
};
|
||||
|
||||
struct MS_API ModelContext : public Context {
|
||||
public:
|
||||
static inline void SetInsertOpConfigPath(const std::shared_ptr<Context> &context, const std::string &cfg_path);
|
||||
static inline std::string GetInsertOpConfigPath(const std::shared_ptr<Context> &context);
|
||||
void Ascend310DeviceInfo::SetDumpConfigPath(const std::string &cfg_path) { SetDumpConfigPath(StringToChar(cfg_path)); }
|
||||
std::string Ascend310DeviceInfo::GetDumpConfigPath() const { return CharToString(GetDumpConfigPathChar()); }
|
||||
|
||||
static inline void SetInputFormat(const std::shared_ptr<Context> &context, const std::string &format);
|
||||
static inline std::string GetInputFormat(const std::shared_ptr<Context> &context);
|
||||
|
||||
static inline void SetInputShape(const std::shared_ptr<Context> &context, const std::string &shape);
|
||||
static inline std::string GetInputShape(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void SetInputShapeMap(const std::shared_ptr<Context> &context, const std::map<int, std::vector<int>> &shape);
|
||||
static std::map<int, std::vector<int>> GetInputShapeMap(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void SetDynamicBatchSize(const std::shared_ptr<Context> &context,
|
||||
const std::vector<size_t> &dynamic_batch_size);
|
||||
static inline std::string GetDynamicBatchSize(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void SetOutputType(const std::shared_ptr<Context> &context, enum DataType output_type);
|
||||
static enum DataType GetOutputType(const std::shared_ptr<Context> &context);
|
||||
|
||||
static inline void SetPrecisionMode(const std::shared_ptr<Context> &context, const std::string &precision_mode);
|
||||
static inline std::string GetPrecisionMode(const std::shared_ptr<Context> &context);
|
||||
|
||||
static inline void SetOpSelectImplMode(const std::shared_ptr<Context> &context,
|
||||
const std::string &op_select_impl_mode);
|
||||
static inline std::string GetOpSelectImplMode(const std::shared_ptr<Context> &context);
|
||||
|
||||
static inline void SetFusionSwitchConfigPath(const std::shared_ptr<Context> &context, const std::string &cfg_path);
|
||||
static inline std::string GetFusionSwitchConfigPath(const std::shared_ptr<Context> &context);
|
||||
|
||||
static inline void SetGpuTrtInferMode(const std::shared_ptr<Context> &context, const std::string &gpu_trt_infer_mode);
|
||||
static inline std::string GetGpuTrtInferMode(const std::shared_ptr<Context> &context);
|
||||
|
||||
private:
|
||||
// api without std::string
|
||||
static void SetInsertOpConfigPath(const std::shared_ptr<Context> &context, const std::vector<char> &cfg_path);
|
||||
static std::vector<char> GetInsertOpConfigPathChar(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void SetInputFormat(const std::shared_ptr<Context> &context, const std::vector<char> &format);
|
||||
static std::vector<char> GetInputFormatChar(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void SetInputShape(const std::shared_ptr<Context> &context, const std::vector<char> &shape);
|
||||
static std::vector<char> GetInputShapeChar(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void SetPrecisionMode(const std::shared_ptr<Context> &context, const std::vector<char> &precision_mode);
|
||||
static std::vector<char> GetPrecisionModeChar(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void SetOpSelectImplMode(const std::shared_ptr<Context> &context,
|
||||
const std::vector<char> &op_select_impl_mode);
|
||||
static std::vector<char> GetOpSelectImplModeChar(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void SetFusionSwitchConfigPath(const std::shared_ptr<Context> &context, const std::vector<char> &cfg_path);
|
||||
static std::vector<char> GetFusionSwitchConfigPathChar(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void SetGpuTrtInferMode(const std::shared_ptr<Context> &context, const std::vector<char> &gpu_trt_infer_mode);
|
||||
static std::vector<char> GetGpuTrtInferModeChar(const std::shared_ptr<Context> &context);
|
||||
static std::vector<char> GetDynamicBatchSizeChar(const std::shared_ptr<Context> &context);
|
||||
};
|
||||
|
||||
void GlobalContext::SetGlobalDeviceTarget(const std::string &device_target) {
|
||||
SetGlobalDeviceTarget(StringToChar(device_target));
|
||||
void Ascend310DeviceInfo::SetInsertOpConfigPath(const std::string &cfg_path) {
|
||||
SetInsertOpConfigPath(StringToChar(cfg_path));
|
||||
}
|
||||
std::string GlobalContext::GetGlobalDeviceTarget() { return CharToString(GetGlobalDeviceTargetChar()); }
|
||||
std::string Ascend310DeviceInfo::GetInsertOpConfigPath() const { return CharToString(GetInsertOpConfigPathChar()); }
|
||||
|
||||
void GlobalContext::SetGlobalDumpConfigPath(const std::string &cfg_path) {
|
||||
SetGlobalDumpConfigPath(StringToChar(cfg_path));
|
||||
}
|
||||
std::string GlobalContext::GetGlobalDumpConfigPath() { return CharToString(GetGlobalDumpConfigPathChar()); }
|
||||
void Ascend310DeviceInfo::SetInputFormat(const std::string &format) { SetInputFormat(StringToChar(format)); }
|
||||
std::string Ascend310DeviceInfo::GetInputFormat() const { return CharToString(GetInputFormatChar()); }
|
||||
|
||||
void ModelContext::SetInsertOpConfigPath(const std::shared_ptr<Context> &context, const std::string &cfg_path) {
|
||||
SetInsertOpConfigPath(context, StringToChar(cfg_path));
|
||||
void Ascend310DeviceInfo::SetInputShape(const std::string &shape) { SetInputShape(StringToChar(shape)); }
|
||||
std::string Ascend310DeviceInfo::GetInputShape() const { return CharToString(GetInputShapeChar()); }
|
||||
|
||||
std::string Ascend310DeviceInfo::GetDynamicBatchSize() const { return CharToString(GetDynamicBatchSizeChar()); }
|
||||
|
||||
void Ascend310DeviceInfo::SetPrecisionMode(const std::string &precision_mode) {
|
||||
SetPrecisionMode(StringToChar(precision_mode));
|
||||
}
|
||||
std::string ModelContext::GetInsertOpConfigPath(const std::shared_ptr<Context> &context) {
|
||||
return CharToString(GetInsertOpConfigPathChar(context));
|
||||
std::string Ascend310DeviceInfo::GetPrecisionMode() const { return CharToString(GetPrecisionModeChar()); }
|
||||
|
||||
void Ascend310DeviceInfo::SetOpSelectImplMode(const std::string &op_select_impl_mode) {
|
||||
SetOpSelectImplMode(StringToChar(op_select_impl_mode));
|
||||
}
|
||||
std::string Ascend310DeviceInfo::GetOpSelectImplMode() const { return CharToString(GetOpSelectImplModeChar()); }
|
||||
|
||||
void Ascend310DeviceInfo::SetFusionSwitchConfigPath(const std::string &cfg_path) {
|
||||
SetFusionSwitchConfigPath(StringToChar(cfg_path));
|
||||
}
|
||||
std::string Ascend310DeviceInfo::GetFusionSwitchConfigPath() const {
|
||||
return CharToString(GetFusionSwitchConfigPathChar());
|
||||
}
|
||||
|
||||
void ModelContext::SetInputFormat(const std::shared_ptr<Context> &context, const std::string &format) {
|
||||
SetInputFormat(context, StringToChar(format));
|
||||
}
|
||||
std::string ModelContext::GetInputFormat(const std::shared_ptr<Context> &context) {
|
||||
return CharToString(GetInputFormatChar(context));
|
||||
}
|
||||
|
||||
void ModelContext::SetInputShape(const std::shared_ptr<Context> &context, const std::string &shape) {
|
||||
SetInputShape(context, StringToChar(shape));
|
||||
}
|
||||
std::string ModelContext::GetInputShape(const std::shared_ptr<Context> &context) {
|
||||
return CharToString(GetInputShapeChar(context));
|
||||
}
|
||||
|
||||
void ModelContext::SetPrecisionMode(const std::shared_ptr<Context> &context, const std::string &precision_mode) {
|
||||
SetPrecisionMode(context, StringToChar(precision_mode));
|
||||
}
|
||||
std::string ModelContext::GetPrecisionMode(const std::shared_ptr<Context> &context) {
|
||||
return CharToString(GetPrecisionModeChar(context));
|
||||
}
|
||||
|
||||
void ModelContext::SetOpSelectImplMode(const std::shared_ptr<Context> &context,
|
||||
const std::string &op_select_impl_mode) {
|
||||
SetOpSelectImplMode(context, StringToChar(op_select_impl_mode));
|
||||
}
|
||||
std::string ModelContext::GetOpSelectImplMode(const std::shared_ptr<Context> &context) {
|
||||
return CharToString(GetOpSelectImplModeChar(context));
|
||||
}
|
||||
|
||||
void ModelContext::SetFusionSwitchConfigPath(const std::shared_ptr<Context> &context, const std::string &cfg_path) {
|
||||
SetFusionSwitchConfigPath(context, StringToChar(cfg_path));
|
||||
}
|
||||
std::string ModelContext::GetFusionSwitchConfigPath(const std::shared_ptr<Context> &context) {
|
||||
return CharToString(GetFusionSwitchConfigPathChar(context));
|
||||
}
|
||||
|
||||
std::string ModelContext::GetDynamicBatchSize(const std::shared_ptr<Context> &context) {
|
||||
return CharToString(GetDynamicBatchSizeChar(context));
|
||||
}
|
||||
|
||||
void ModelContext::SetGpuTrtInferMode(const std::shared_ptr<Context> &context, const std::string &gpu_trt_infer_mode) {
|
||||
SetGpuTrtInferMode(context, StringToChar(gpu_trt_infer_mode));
|
||||
}
|
||||
std::string ModelContext::GetGpuTrtInferMode(const std::shared_ptr<Context> &context) {
|
||||
return CharToString(GetGpuTrtInferModeChar(context));
|
||||
void Ascend310DeviceInfo::SetBufferOptimizeMode(const std::string &buffer_optimize_mode) {
|
||||
SetBufferOptimizeMode(StringToChar(buffer_optimize_mode));
|
||||
}
|
||||
std::string Ascend310DeviceInfo::GetBufferOptimizeMode() const { return CharToString(GetBufferOptimizeModeChar()); }
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_INCLUDE_API_CONTEXT_H
|
||||
|
|
|
@ -27,6 +27,7 @@ namespace mindspore {
|
|||
class MS_API Graph {
|
||||
public:
|
||||
class GraphData;
|
||||
Graph();
|
||||
explicit Graph(const std::shared_ptr<GraphData> &graph_data);
|
||||
explicit Graph(std::shared_ptr<GraphData> &&graph_data);
|
||||
explicit Graph(std::nullptr_t);
|
||||
|
@ -34,6 +35,7 @@ class MS_API Graph {
|
|||
|
||||
enum ModelType ModelType() const;
|
||||
bool operator==(std::nullptr_t) const;
|
||||
bool operator!=(std::nullptr_t) const;
|
||||
|
||||
private:
|
||||
friend class GraphCell;
|
||||
|
|
|
@ -1,71 +0,0 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_INCLUDE_API_LITE_CONTEXT_H
|
||||
#define MINDSPORE_INCLUDE_API_LITE_CONTEXT_H
|
||||
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <map>
|
||||
#include <any>
|
||||
#include "include/api/types.h"
|
||||
#include "include/lite_types.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
class Allocator;
|
||||
} // namespace lite
|
||||
|
||||
struct MS_API Context {
|
||||
public:
|
||||
static void Clear(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void SetAsDefault(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void SetVendorName(const std::shared_ptr<Context> &context, const std::string &name);
|
||||
static std::string GetVendorName(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void SetThreadNum(const std::shared_ptr<Context> &context, int num);
|
||||
static int GetThreadNum(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void SetAllocator(const std::shared_ptr<Context> &context, std::shared_ptr<lite::Allocator> alloc);
|
||||
static std::shared_ptr<lite::Allocator> GetAllocator(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void ConfigCPU(const std::shared_ptr<Context> &context, bool config);
|
||||
static bool IfCPUEnabled(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void ConfigCPUFp16(const std::shared_ptr<Context> &context, bool config);
|
||||
static bool IfCPUFp16Enabled(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void SetCPUBindMode(const std::shared_ptr<Context> &context, lite::CpuBindMode mode);
|
||||
static lite::CpuBindMode GetCPUBindMode(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void ConfigGPU(const std::shared_ptr<Context> &context, bool config);
|
||||
static bool IfGPUEnabled(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void ConfigGPUFp16(const std::shared_ptr<Context> &context, bool config);
|
||||
static bool IfGPUFp16Enabled(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void ConfigNPU(const std::shared_ptr<Context> &context, bool config);
|
||||
static bool IfNPUEnabled(const std::shared_ptr<Context> &context);
|
||||
|
||||
static void SetNPUFrequency(const std::shared_ptr<Context> &context, int freq);
|
||||
static int GetNPUFrequency(const std::shared_ptr<Context> &context);
|
||||
|
||||
private:
|
||||
std::map<std::string, std::any> context_;
|
||||
};
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_INCLUDE_API_LITE_CONTEXT_H
|
|
@ -24,39 +24,57 @@
|
|||
#include "include/api/status.h"
|
||||
#include "include/api/types.h"
|
||||
#include "include/api/graph.h"
|
||||
#include "include/api/context.h"
|
||||
#include "include/api/cell.h"
|
||||
#include "include/api/dual_abi_helper.h"
|
||||
|
||||
namespace mindspore {
|
||||
class ModelImpl;
|
||||
struct Context;
|
||||
|
||||
class MS_API Model {
|
||||
public:
|
||||
explicit Model(const std::vector<Output> &network, const std::shared_ptr<Context> &model_context = nullptr);
|
||||
explicit Model(const GraphCell &graph, const std::shared_ptr<Context> &model_context = nullptr);
|
||||
Model();
|
||||
~Model();
|
||||
Model(const Model &) = delete;
|
||||
void operator=(const Model &) = delete;
|
||||
|
||||
Status Build();
|
||||
Status Build(GraphCell graph, const std::shared_ptr<Context> &model_context = nullptr);
|
||||
Status Resize(const std::vector<MSTensor> &inputs, const std::vector<std::vector<int64_t>> &dims);
|
||||
|
||||
Status Predict(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs);
|
||||
|
||||
std::vector<MSTensor> GetInputs();
|
||||
std::vector<MSTensor> GetOutputs();
|
||||
inline MSTensor GetInputByTensorName(const std::string &tensor_name);
|
||||
|
||||
static inline bool CheckModelSupport(const std::string &device_type, ModelType model_type);
|
||||
std::vector<MSTensor> GetOutputs();
|
||||
inline std::vector<std::string> GetOutputTensorNames();
|
||||
inline MSTensor GetOutputByTensorName(const std::string &tensor_name);
|
||||
inline std::vector<MSTensor> GetOutputsByNodeName(const std::string &tensor_name);
|
||||
|
||||
static bool CheckModelSupport(enum DeviceType device_type, ModelType model_type);
|
||||
|
||||
private:
|
||||
// api without std::string
|
||||
static bool CheckModelSupport(const std::vector<char> &device_type, ModelType model_type);
|
||||
MSTensor GetInputByTensorName(const std::vector<char> &tensor_name);
|
||||
std::vector<std::vector<char>> GetOutputTensorNamesChar();
|
||||
MSTensor GetOutputByTensorName(const std::vector<char> &tensor_name);
|
||||
std::vector<MSTensor> GetOutputsByNodeName(const std::vector<char> &node_name);
|
||||
|
||||
std::shared_ptr<ModelImpl> impl_;
|
||||
};
|
||||
|
||||
bool Model::CheckModelSupport(const std::string &device_type, ModelType model_type) {
|
||||
return CheckModelSupport(StringToChar(device_type), model_type);
|
||||
MSTensor Model::GetInputByTensorName(const std::string &tensor_name) {
|
||||
return GetInputByTensorName(StringToChar(tensor_name));
|
||||
}
|
||||
|
||||
std::vector<std::string> Model::GetOutputTensorNames() { return VectorCharToString(GetOutputTensorNamesChar()); }
|
||||
|
||||
MSTensor Model::GetOutputByTensorName(const std::string &tensor_name) {
|
||||
return GetOutputByTensorName(StringToChar(tensor_name));
|
||||
}
|
||||
|
||||
std::vector<MSTensor> Model::GetOutputsByNodeName(const std::string &tensor_name) {
|
||||
return GetOutputsByNodeName(StringToChar(tensor_name));
|
||||
}
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_INCLUDE_API_MODEL_H
|
||||
|
|
|
@ -29,19 +29,19 @@
|
|||
namespace mindspore {
|
||||
class MS_API Serialization {
|
||||
public:
|
||||
static Graph LoadModel(const void *model_data, size_t data_size, ModelType model_type);
|
||||
inline static Graph LoadModel(const std::string &file, ModelType model_type);
|
||||
static Status Load(const void *model_data, size_t data_size, ModelType model_type, Graph *graph);
|
||||
inline static Status Load(const std::string &file, ModelType model_type, Graph *graph);
|
||||
static Status LoadCheckPoint(const std::string &ckpt_file, std::map<std::string, Buffer> *parameters);
|
||||
static Status SetParameters(const std::map<std::string, Buffer> ¶meters, Model *model);
|
||||
static Status ExportModel(const Model &model, ModelType model_type, Buffer *model_data);
|
||||
static Status ExportModel(const Model &model, ModelType model_type, const std::string &model_file);
|
||||
|
||||
private:
|
||||
static Graph LoadModel(const std::vector<char> &file, ModelType model_type);
|
||||
static Status Load(const std::vector<char> &file, ModelType model_type, Graph *graph);
|
||||
};
|
||||
|
||||
Graph Serialization::LoadModel(const std::string &file, ModelType model_type) {
|
||||
return LoadModel(StringToChar(file), model_type);
|
||||
Status Serialization::Load(const std::string &file, ModelType model_type, Graph *graph) {
|
||||
return Load(StringToChar(file), model_type, graph);
|
||||
}
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_INCLUDE_API_SERIALIZATION_H
|
||||
|
|
|
@ -43,15 +43,19 @@ class MS_API MSTensor {
|
|||
public:
|
||||
class Impl;
|
||||
|
||||
static inline MSTensor CreateTensor(const std::string &name, DataType type, const std::vector<int64_t> &shape,
|
||||
const void *data, size_t data_len) noexcept;
|
||||
static inline MSTensor CreateRefTensor(const std::string &name, DataType type, const std::vector<int64_t> &shape,
|
||||
const void *data, size_t data_len) noexcept;
|
||||
static inline MSTensor *CreateTensor(const std::string &name, DataType type, const std::vector<int64_t> &shape,
|
||||
const void *data, size_t data_len) noexcept;
|
||||
static inline MSTensor *CreateRefTensor(const std::string &name, DataType type, const std::vector<int64_t> &shape,
|
||||
const void *data, size_t data_len) noexcept;
|
||||
static inline MSTensor *StringsToTensor(const std::string &name, const std::vector<std::string> &str);
|
||||
static inline std::vector<std::string> TensorToStrings(const MSTensor &tensor);
|
||||
static void DestroyTensorPtr(MSTensor *tensor) noexcept;
|
||||
|
||||
MSTensor();
|
||||
explicit MSTensor(const std::shared_ptr<Impl> &impl);
|
||||
inline MSTensor(const std::string &name, DataType type, const std::vector<int64_t> &shape, const void *data,
|
||||
size_t data_len);
|
||||
explicit MSTensor(std::nullptr_t);
|
||||
~MSTensor();
|
||||
|
||||
inline std::string Name() const;
|
||||
|
@ -65,21 +69,24 @@ class MS_API MSTensor {
|
|||
|
||||
bool IsDevice() const;
|
||||
|
||||
MSTensor Clone() const;
|
||||
MSTensor *Clone() const;
|
||||
bool operator==(std::nullptr_t) const;
|
||||
bool operator!=(std::nullptr_t) const;
|
||||
|
||||
private:
|
||||
// api without std::string
|
||||
static MSTensor CreateTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape,
|
||||
const void *data, size_t data_len) noexcept;
|
||||
static MSTensor CreateRefTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape,
|
||||
const void *data, size_t data_len) noexcept;
|
||||
static MSTensor *CreateTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape,
|
||||
const void *data, size_t data_len) noexcept;
|
||||
static MSTensor *CreateRefTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape,
|
||||
const void *data, size_t data_len) noexcept;
|
||||
static MSTensor *CharStringsToTensor(const std::vector<char> &name, const std::vector<std::vector<char>> &str);
|
||||
static std::vector<std::vector<char>> TensorToStringChars(const MSTensor &tensor);
|
||||
|
||||
MSTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape, const void *data,
|
||||
size_t data_len);
|
||||
std::vector<char> CharName() const;
|
||||
|
||||
friend class ModelImpl;
|
||||
explicit MSTensor(std::nullptr_t);
|
||||
std::shared_ptr<Impl> impl_;
|
||||
};
|
||||
|
||||
|
@ -103,16 +110,24 @@ class MS_API Buffer {
|
|||
std::shared_ptr<Impl> impl_;
|
||||
};
|
||||
|
||||
MSTensor MSTensor::CreateTensor(const std::string &name, enum DataType type, const std::vector<int64_t> &shape,
|
||||
const void *data, size_t data_len) noexcept {
|
||||
MSTensor *MSTensor::CreateTensor(const std::string &name, enum DataType type, const std::vector<int64_t> &shape,
|
||||
const void *data, size_t data_len) noexcept {
|
||||
return CreateTensor(StringToChar(name), type, shape, data, data_len);
|
||||
}
|
||||
|
||||
MSTensor MSTensor::CreateRefTensor(const std::string &name, enum DataType type, const std::vector<int64_t> &shape,
|
||||
const void *data, size_t data_len) noexcept {
|
||||
MSTensor *MSTensor::CreateRefTensor(const std::string &name, enum DataType type, const std::vector<int64_t> &shape,
|
||||
const void *data, size_t data_len) noexcept {
|
||||
return CreateRefTensor(StringToChar(name), type, shape, data, data_len);
|
||||
}
|
||||
|
||||
MSTensor *MSTensor::StringsToTensor(const std::string &name, const std::vector<std::string> &str) {
|
||||
return CharStringsToTensor(StringToChar(name), VectorStringToChar(str));
|
||||
}
|
||||
|
||||
std::vector<std::string> MSTensor::TensorToStrings(const MSTensor &tensor) {
|
||||
return VectorCharToString(TensorToStringChars(tensor));
|
||||
}
|
||||
|
||||
MSTensor::MSTensor(const std::string &name, enum DataType type, const std::vector<int64_t> &shape, const void *data,
|
||||
size_t data_len)
|
||||
: MSTensor(StringToChar(name), type, shape, data, data_len) {}
|
||||
|
|
|
@ -1,134 +0,0 @@
|
|||
/**
|
||||
* Copyright 2019 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_INFERENCE_LOG_H_
|
||||
#define MINDSPORE_INFERENCE_LOG_H_
|
||||
|
||||
#include <stdarg.h>
|
||||
#include <stdint.h>
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
#include <memory>
|
||||
#include <iostream>
|
||||
#include <chrono>
|
||||
#include <vector>
|
||||
|
||||
#ifndef ENABLE_ACL
|
||||
#include "mindspore/core/utils/log_adapter.h"
|
||||
#else // ENABLE_ACL
|
||||
#include "acl/acl.h"
|
||||
#endif
|
||||
|
||||
namespace mindspore::inference {
|
||||
|
||||
class LogStream {
|
||||
public:
|
||||
LogStream() { sstream_ = std::make_shared<std::stringstream>(); }
|
||||
~LogStream() = default;
|
||||
|
||||
template <typename T>
|
||||
LogStream &operator<<(const T &val) noexcept {
|
||||
(*sstream_) << val;
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
LogStream &operator<<(const std::vector<T> &val) noexcept {
|
||||
(*sstream_) << "[";
|
||||
for (size_t i = 0; i < val.size(); i++) {
|
||||
(*this) << val[i];
|
||||
if (i + 1 < val.size()) {
|
||||
(*sstream_) << ", ";
|
||||
}
|
||||
}
|
||||
(*sstream_) << "]";
|
||||
return *this;
|
||||
}
|
||||
|
||||
LogStream &operator<<(std::ostream &func(std::ostream &os)) noexcept {
|
||||
(*sstream_) << func;
|
||||
return *this;
|
||||
}
|
||||
|
||||
friend class LogWriter;
|
||||
friend class Status;
|
||||
|
||||
private:
|
||||
std::shared_ptr<std::stringstream> sstream_;
|
||||
};
|
||||
|
||||
#ifndef ENABLE_ACL
|
||||
#define MSI_LOG(level) MS_LOG(level)
|
||||
|
||||
#define MSI_LOG_DEBUG MSI_LOG(DEBUG)
|
||||
#define MSI_LOG_INFO MSI_LOG(INFO)
|
||||
#define MSI_LOG_WARNING MSI_LOG(WARNING)
|
||||
#define MSI_LOG_ERROR MSI_LOG(ERROR)
|
||||
|
||||
#define MSI_ASSERT(item) MS_ASSERT(item)
|
||||
|
||||
#else // ENABLE_ACL
|
||||
|
||||
class LogWriter {
|
||||
public:
|
||||
LogWriter(const char *file, int line, const char *func, aclLogLevel log_level)
|
||||
: file_(file), line_(line), func_(func), log_level_(log_level) {}
|
||||
~LogWriter() = default;
|
||||
|
||||
void operator<(const LogStream &stream) const noexcept __attribute__((visibility("default"))) {
|
||||
std::ostringstream msg;
|
||||
msg << stream.sstream_->rdbuf();
|
||||
OutputLog(msg);
|
||||
}
|
||||
|
||||
private:
|
||||
void OutputLog(const std::ostringstream &msg) const { aclAppLog(log_level_, func_, file_, line_, msg.str().c_str()); }
|
||||
|
||||
const char *file_;
|
||||
int line_;
|
||||
const char *func_;
|
||||
aclLogLevel log_level_;
|
||||
};
|
||||
|
||||
#define MSILOG_IF(level) inference::LogWriter(__FILE__, __LINE__, __FUNCTION__, ACL_##level) < inference::LogStream()
|
||||
|
||||
#define MSI_LOG(level) MSI_LOG_##level
|
||||
|
||||
#define MSI_LOG_DEBUG MSILOG_IF(DEBUG)
|
||||
#define MSI_LOG_INFO MSILOG_IF(INFO)
|
||||
#define MSI_LOG_WARNING MSILOG_IF(WARNING)
|
||||
#define MSI_LOG_ERROR MSILOG_IF(ERROR)
|
||||
|
||||
#define MSI_ASSERT(item)
|
||||
|
||||
#endif // ENABLE_ACL
|
||||
|
||||
#define MSI_TIME_STAMP_START(name) auto time_start_##name = std::chrono::steady_clock::now();
|
||||
#define MSI_TIME_STAMP_END(name) \
|
||||
{ \
|
||||
auto time_end_##name = std::chrono::steady_clock::now(); \
|
||||
auto time_cost = std::chrono::duration<double, std::milli>(time_end_##name - time_start_##name).count(); \
|
||||
MSI_LOG_INFO << #name " Time Cost # " << time_cost << " ms ---------------------"; \
|
||||
}
|
||||
|
||||
#define INFER_STATUS(code) inference::Status(code) < inference::LogStream()
|
||||
#define ERROR_INFER_STATUS(status, type, msg) \
|
||||
MSI_LOG_ERROR << msg; \
|
||||
status = inference::Status(type, msg)
|
||||
|
||||
} // namespace mindspore::inference
|
||||
|
||||
#endif // MINDSPORE_INFERENCE_LOG_H_
|
|
@ -1,217 +0,0 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_INCLUDE_INFER_TENSOR_H_
|
||||
#define MINDSPORE_INCLUDE_INFER_TENSOR_H_
|
||||
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include <numeric>
|
||||
#include <map>
|
||||
#include <functional>
|
||||
|
||||
#include "securec/include/securec.h"
|
||||
#include "include/infer_log.h"
|
||||
|
||||
namespace mindspore {
|
||||
#define MS_API __attribute__((visibility("default")))
|
||||
namespace inference {
|
||||
enum DataType {
|
||||
kMSI_Unknown = 0,
|
||||
kMSI_Bool = 1,
|
||||
kMSI_Int8 = 2,
|
||||
kMSI_Int16 = 3,
|
||||
kMSI_Int32 = 4,
|
||||
kMSI_Int64 = 5,
|
||||
kMSI_Uint8 = 6,
|
||||
kMSI_Uint16 = 7,
|
||||
kMSI_Uint32 = 8,
|
||||
kMSI_Uint64 = 9,
|
||||
kMSI_Float16 = 10,
|
||||
kMSI_Float32 = 11,
|
||||
kMSI_Float64 = 12,
|
||||
};
|
||||
|
||||
class InferTensorBase {
|
||||
public:
|
||||
InferTensorBase() = default;
|
||||
virtual ~InferTensorBase() = default;
|
||||
|
||||
virtual DataType data_type() const = 0;
|
||||
virtual void set_data_type(DataType type) = 0;
|
||||
virtual std::vector<int64_t> shape() const = 0;
|
||||
virtual void set_shape(const std::vector<int64_t> &shape) = 0;
|
||||
virtual const void *data() const = 0;
|
||||
virtual size_t data_size() const = 0;
|
||||
virtual bool resize_data(size_t data_len) = 0;
|
||||
virtual void *mutable_data() = 0;
|
||||
|
||||
bool set_data(const void *data, size_t data_len) {
|
||||
resize_data(data_len);
|
||||
if (mutable_data() == nullptr) {
|
||||
MSI_LOG_ERROR << "set data failed, data len " << data_len;
|
||||
return false;
|
||||
}
|
||||
if (data_size() != data_len) {
|
||||
MSI_LOG_ERROR << "set data failed, tensor current data size " << data_size() << " not match data len "
|
||||
<< data_len;
|
||||
return false;
|
||||
}
|
||||
if (data_len == 0) {
|
||||
return true;
|
||||
}
|
||||
auto ret = memcpy_s(mutable_data(), data_size(), data, data_len);
|
||||
if (ret != 0) {
|
||||
MSI_LOG_ERROR << "Set data memcpy_s failed";
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
int64_t ElementNum() const {
|
||||
std::vector<int64_t> shapex = shape();
|
||||
return std::accumulate(shapex.begin(), shapex.end(), 1LL, std::multiplies<int64_t>());
|
||||
}
|
||||
|
||||
int GetTypeSize(DataType type) const {
|
||||
const std::map<DataType, size_t> type_size_map{
|
||||
{kMSI_Bool, sizeof(bool)}, {kMSI_Float64, sizeof(double)}, {kMSI_Int8, sizeof(int8_t)},
|
||||
{kMSI_Uint8, sizeof(uint8_t)}, {kMSI_Int16, sizeof(int16_t)}, {kMSI_Uint16, sizeof(uint16_t)},
|
||||
{kMSI_Int32, sizeof(int32_t)}, {kMSI_Uint32, sizeof(uint32_t)}, {kMSI_Int64, sizeof(int64_t)},
|
||||
{kMSI_Uint64, sizeof(uint64_t)}, {kMSI_Float16, sizeof(uint16_t)}, {kMSI_Float32, sizeof(float)},
|
||||
};
|
||||
auto it = type_size_map.find(type);
|
||||
if (it != type_size_map.end()) {
|
||||
return it->second;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
||||
class InferTensor : public InferTensorBase {
|
||||
public:
|
||||
DataType type_;
|
||||
std::vector<int64_t> shape_;
|
||||
std::vector<uint8_t> data_;
|
||||
|
||||
public:
|
||||
InferTensor() = default;
|
||||
~InferTensor() = default;
|
||||
InferTensor(DataType type, std::vector<int64_t> shape, const void *data, size_t data_len) {
|
||||
set_data_type(type);
|
||||
set_shape(shape);
|
||||
set_data(data, data_len);
|
||||
}
|
||||
|
||||
void set_data_type(DataType type) override { type_ = type; }
|
||||
DataType data_type() const override { return type_; }
|
||||
|
||||
void set_shape(const std::vector<int64_t> &shape) override { shape_ = shape; }
|
||||
std::vector<int64_t> shape() const override { return shape_; }
|
||||
|
||||
const void *data() const override { return data_.data(); }
|
||||
size_t data_size() const override { return data_.size(); }
|
||||
|
||||
bool resize_data(size_t data_len) override {
|
||||
data_.resize(data_len);
|
||||
return true;
|
||||
}
|
||||
void *mutable_data() override { return data_.data(); }
|
||||
};
|
||||
|
||||
class InferImagesBase {
|
||||
public:
|
||||
InferImagesBase() = default;
|
||||
virtual ~InferImagesBase() = default;
|
||||
virtual size_t batch_size() const = 0;
|
||||
virtual bool get(size_t index, const void *&pic_buffer, uint32_t &pic_size) const = 0;
|
||||
virtual size_t input_index() const = 0; // the index of images as input in model
|
||||
};
|
||||
|
||||
class RequestBase {
|
||||
public:
|
||||
RequestBase() = default;
|
||||
virtual ~RequestBase() = default;
|
||||
virtual size_t size() const = 0;
|
||||
virtual const InferTensorBase *operator[](size_t index) const = 0;
|
||||
};
|
||||
|
||||
class ImagesRequestBase {
|
||||
public:
|
||||
ImagesRequestBase() = default;
|
||||
virtual ~ImagesRequestBase() = default;
|
||||
virtual size_t size() const = 0;
|
||||
virtual const InferImagesBase *operator[](size_t index) const = 0;
|
||||
};
|
||||
|
||||
class ReplyBase {
|
||||
public:
|
||||
ReplyBase() = default;
|
||||
virtual ~ReplyBase() = default;
|
||||
virtual size_t size() const = 0;
|
||||
virtual InferTensorBase *operator[](size_t index) = 0;
|
||||
virtual const InferTensorBase *operator[](size_t index) const = 0;
|
||||
virtual InferTensorBase *add() = 0;
|
||||
virtual void clear() = 0;
|
||||
};
|
||||
|
||||
class VectorInferTensorWrapReply : public ReplyBase {
|
||||
public:
|
||||
explicit VectorInferTensorWrapReply(std::vector<InferTensor> &tensor_list) : tensor_list_(tensor_list) {}
|
||||
~VectorInferTensorWrapReply() = default;
|
||||
|
||||
size_t size() const { return tensor_list_.size(); }
|
||||
InferTensorBase *operator[](size_t index) {
|
||||
if (index >= tensor_list_.size()) {
|
||||
MSI_LOG_ERROR << "visit invalid index " << index << " total size " << tensor_list_.size();
|
||||
return nullptr;
|
||||
}
|
||||
return &(tensor_list_[index]);
|
||||
}
|
||||
const InferTensorBase *operator[](size_t index) const {
|
||||
if (index >= tensor_list_.size()) {
|
||||
MSI_LOG_ERROR << "visit invalid index " << index << " total size " << tensor_list_.size();
|
||||
return nullptr;
|
||||
}
|
||||
return &(tensor_list_[index]);
|
||||
}
|
||||
InferTensorBase *add() {
|
||||
tensor_list_.push_back(InferTensor());
|
||||
return &(tensor_list_.back());
|
||||
}
|
||||
void clear() { tensor_list_.clear(); }
|
||||
std::vector<InferTensor> &tensor_list_;
|
||||
};
|
||||
|
||||
class VectorInferTensorWrapRequest : public RequestBase {
|
||||
public:
|
||||
explicit VectorInferTensorWrapRequest(const std::vector<InferTensor> &tensor_list) : tensor_list_(tensor_list) {}
|
||||
~VectorInferTensorWrapRequest() = default;
|
||||
|
||||
size_t size() const { return tensor_list_.size(); }
|
||||
const InferTensorBase *operator[](size_t index) const {
|
||||
if (index >= tensor_list_.size()) {
|
||||
MSI_LOG_ERROR << "visit invalid index " << index << " total size " << tensor_list_.size();
|
||||
return nullptr;
|
||||
}
|
||||
return &(tensor_list_[index]);
|
||||
}
|
||||
const std::vector<InferTensor> &tensor_list_;
|
||||
};
|
||||
} // namespace inference
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_INCLUDE_INFER_TENSOR_H_
|
|
@ -1,86 +0,0 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_INCLUDE_MS_SESSION_H
|
||||
#define MINDSPORE_INCLUDE_MS_SESSION_H
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include "include/infer_tensor.h"
|
||||
#include "include/infer_log.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace inference {
|
||||
enum StatusCode { SUCCESS = 0, FAILED, INVALID_INPUTS };
|
||||
|
||||
class Status {
|
||||
public:
|
||||
Status() : status_code_(FAILED) {}
|
||||
Status(enum StatusCode status_code, const std::string &status_msg = "")
|
||||
: status_code_(status_code), status_msg_(status_msg) {}
|
||||
~Status() = default;
|
||||
|
||||
bool IsSuccess() const { return status_code_ == SUCCESS; }
|
||||
enum StatusCode StatusCode() const { return status_code_; }
|
||||
std::string StatusMessage() const { return status_msg_; }
|
||||
bool operator==(const Status &other) const { return status_code_ == other.status_code_; }
|
||||
bool operator==(enum StatusCode other_code) const { return status_code_ == other_code; }
|
||||
bool operator!=(const Status &other) const { return status_code_ != other.status_code_; }
|
||||
bool operator!=(enum StatusCode other_code) const { return status_code_ != other_code; }
|
||||
operator bool() const = delete;
|
||||
Status &operator<(const LogStream &stream) noexcept __attribute__((visibility("default"))) {
|
||||
status_msg_ = stream.sstream_->str();
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
enum StatusCode status_code_;
|
||||
std::string status_msg_;
|
||||
};
|
||||
|
||||
class MS_API InferSession {
|
||||
public:
|
||||
InferSession() = default;
|
||||
virtual ~InferSession() = default;
|
||||
virtual Status InitEnv(const std::string &device_type, uint32_t device_id) = 0;
|
||||
virtual Status FinalizeEnv() = 0;
|
||||
virtual Status LoadModelFromFile(const std::string &file_name, uint32_t &model_id) = 0;
|
||||
virtual Status UnloadModel(uint32_t model_id) = 0;
|
||||
// override this method to avoid request/reply data copy
|
||||
virtual Status ExecuteModel(uint32_t model_id, const RequestBase &request, ReplyBase &reply) = 0;
|
||||
|
||||
virtual Status ExecuteModel(uint32_t model_id, const std::vector<InferTensor> &inputs,
|
||||
std::vector<InferTensor> &outputs) {
|
||||
VectorInferTensorWrapRequest request(inputs);
|
||||
VectorInferTensorWrapReply reply(outputs);
|
||||
return ExecuteModel(model_id, request, reply);
|
||||
}
|
||||
// default not support input data preprocess(decode, resize, crop, crop&paste, etc.)
|
||||
virtual Status ExecuteModel(uint32_t /*model_id*/,
|
||||
const ImagesRequestBase & /*images_inputs*/, // images for preprocess
|
||||
const RequestBase & /*request*/, ReplyBase & /*reply*/) {
|
||||
return FAILED;
|
||||
}
|
||||
virtual Status GetModelInputsInfo(uint32_t graph_id, std::vector<inference::InferTensor> *tensor_list) const {
|
||||
Status status(SUCCESS);
|
||||
return status;
|
||||
}
|
||||
static std::shared_ptr<InferSession> CreateSession(const std::string &device, uint32_t device_id);
|
||||
};
|
||||
} // namespace inference
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_INCLUDE_MS_SESSION_H
|
|
@ -15,7 +15,7 @@
|
|||
""".. MindSpore package."""
|
||||
|
||||
from ._check_version import check_version_and_env_config
|
||||
from . import common, train
|
||||
from . import common, train, log
|
||||
from .common import *
|
||||
from .ops import _op_impl
|
||||
from .train import *
|
||||
|
|
|
@ -46,6 +46,7 @@ class GPUEnvChecker(EnvChecker):
|
|||
|
||||
def __init__(self):
|
||||
self.version = ["10.1"]
|
||||
self.lib_key_to_lib_name = {'libcu': 'libcuda.so'}
|
||||
# env
|
||||
self.path = os.getenv("PATH")
|
||||
self.ld_lib_path = os.getenv("LD_LIBRARY_PATH")
|
||||
|
@ -131,25 +132,32 @@ class GPUEnvChecker(EnvChecker):
|
|||
"""Get gpu lib path by ldd command."""
|
||||
path_list = []
|
||||
current_path = os.path.split(os.path.realpath(__file__))[0]
|
||||
ldd_result = subprocess.run(["ldd " + current_path + "/_c_expression*.so* | grep " + lib_name],
|
||||
timeout=3, text=True, capture_output=True, check=False, shell=True)
|
||||
if ldd_result.returncode:
|
||||
logger.warning(f"{lib_name} so(need by mndspore-gpu) is not found, please confirm that "
|
||||
f"_c_experssion.so depend on {lib_name}, "
|
||||
f"and _c_expression.so in directory:{current_path}")
|
||||
try:
|
||||
ldd_result = subprocess.run(["ldd " + current_path + "/_c_expression*.so* | grep " + lib_name],
|
||||
timeout=10, text=True, capture_output=True, check=False, shell=True)
|
||||
if ldd_result.returncode:
|
||||
logger.error(f"{self.lib_key_to_lib_name[lib_name]} (need by mindspore-gpu) is not found, please "
|
||||
f"confirm that _c_expression.so is in directory:{current_path} and the correct cuda "
|
||||
"version has been installed, you can refer to the installation "
|
||||
"guidelines: https://www.mindspore.cn/install")
|
||||
return path_list
|
||||
result = ldd_result.stdout
|
||||
for i in result.split('\n'):
|
||||
path = i.partition("=>")[2]
|
||||
if path.lower().find("not found") > 0:
|
||||
logger.warning(f"Cuda {self.version} version(need by mindspore-gpu) is not found, please confirm "
|
||||
"that the path of cuda is set to the env LD_LIBRARY_PATH, please refer to the "
|
||||
"installation guidelines: https://www.mindspore.cn/install")
|
||||
continue
|
||||
path = path.partition(lib_name)[0]
|
||||
if path:
|
||||
path_list.append(os.path.abspath(path.strip() + "../"))
|
||||
return np.unique(path_list)
|
||||
except subprocess.TimeoutExpired:
|
||||
logger.warning("Failed to check cuda version due to the ldd command timeout, please confirm that "
|
||||
"the correct cuda version has been installed, you can refer to the "
|
||||
"installation guidelines: https://www.mindspore.cn/install")
|
||||
return path_list
|
||||
result = ldd_result.stdout
|
||||
for i in result.split('\n'):
|
||||
path = i.partition("=>")[2]
|
||||
if path.lower().find("not found") > 0:
|
||||
logger.warning(f"Cuda {self.version} version(need by mindspore-gpu) is not found, please confirm "
|
||||
"that the path of cuda is set to the env LD_LIBRARY_PATH, please refer to the "
|
||||
"installation guidelines: https://www.mindspore.cn/install")
|
||||
continue
|
||||
path = path.partition(lib_name)[0]
|
||||
if path:
|
||||
path_list.append(os.path.abspath(path.strip() + "../"))
|
||||
return np.unique(path_list)
|
||||
|
||||
def _read_version(self, file_path):
|
||||
"""Get gpu version info in version.txt."""
|
||||
|
@ -166,7 +174,7 @@ class AscendEnvChecker(EnvChecker):
|
|||
"""ascend environment check"""
|
||||
|
||||
def __init__(self):
|
||||
self.version = ["1.77.22.0.220"]
|
||||
self.version = ["1.77.22.6.220"]
|
||||
atlas_nnae_version = "/usr/local/Ascend/nnae/latest/fwkacllib/version.info"
|
||||
atlas_toolkit_version = "/usr/local/Ascend/ascend-toolkit/latest/fwkacllib/version.info"
|
||||
hisi_fwk_version = "/usr/local/Ascend/fwkacllib/version.info"
|
||||
|
|
|
@ -100,7 +100,7 @@ def _check_3d_int_or_tuple(arg_name, arg_value, prim_name, allow_five=False, ret
|
|||
|
||||
def _raise_message(third_one_flag=False, three_input_flag=False):
|
||||
if third_one_flag:
|
||||
raise ValueError(f"For '{prim_name}' the depth of attr '{arg_name}' should be 1, but got {arg_value[-3]}")
|
||||
raise ValueError(f"For '{prim_name}' the depth of attr '{arg_name}' should be 1, but got {ret_value[-3]}")
|
||||
if three_input_flag:
|
||||
raise ValueError(f"For '{prim_name}' attr '{arg_name}' should be an positive int number or a tuple of "
|
||||
f"three positive int numbers, but got {arg_value}")
|
||||
|
@ -110,8 +110,6 @@ def _check_3d_int_or_tuple(arg_name, arg_value, prim_name, allow_five=False, ret
|
|||
def _get_return_value():
|
||||
if isinstance(arg_value, int):
|
||||
ret = (1, 1, arg_value, arg_value, arg_value) if ret_five else (arg_value, arg_value, arg_value)
|
||||
if third_one:
|
||||
ret = (1, 1, 1, arg_value, arg_value) if ret_five else (1, arg_value, arg_value)
|
||||
elif len(arg_value) == 3:
|
||||
ret = (1, 1, arg_value[0], arg_value[1], arg_value[2]) if ret_five else arg_value
|
||||
elif len(arg_value) == 5:
|
||||
|
|
|
@ -23,11 +23,40 @@ from mindspore._extends.graph_kernel.model.model import GraphKernelUnsupportedEx
|
|||
|
||||
def create_expander(expand_info):
|
||||
"""Create an expander according to op name"""
|
||||
expander_list = {
|
||||
"AssignAdd": expanders.AssignAdd,
|
||||
"BiasAdd": expanders.BiasAdd,
|
||||
"BiasAddGrad": expanders.BiasAddGrad,
|
||||
"ClipByNormNoDivSum": expanders.ClipByNormNoDivSum,
|
||||
"DropoutGrad": expanders.DropoutGrad,
|
||||
"FusedAdam": expanders.FusedAdam,
|
||||
"FusedAdamWeightDecay": expanders.FusedAdamWeightDecay,
|
||||
"GeLU": expanders.GeLU,
|
||||
"GeLUGrad": expanders.GeLUGrad,
|
||||
"GkDropout": expanders.GkDropout,
|
||||
"LayerNorm": expanders.LayerNorm,
|
||||
"LayerNormGrad": expanders.LayerNormGrad,
|
||||
"LogSoftmax": expanders.LogSoftmax,
|
||||
"LogSoftmaxGrad": expanders.LogSoftmaxGrad,
|
||||
"MaximumGrad": expanders.MaximumGrad,
|
||||
"MinimumGrad": expanders.MinimumGrad,
|
||||
"ReduceMean": expanders.ReduceMean,
|
||||
"Softmax": expanders.Softmax,
|
||||
"Sigmoid": expanders.Sigmoid,
|
||||
"SigmoidGrad": expanders.SigmoidGrad,
|
||||
"SigmoidCrossEntropyWithLogits": expanders.SigmoidCrossEntropyWithLogits,
|
||||
"SigmoidCrossEntropyWithLogitsGrad": expanders.SigmoidCrossEntropyWithLogitsGrad,
|
||||
"SoftmaxCrossEntropyWithLogits": expanders.SoftmaxCrossEntropyWithLogits,
|
||||
"SqrtGrad": expanders.SqrtGrad,
|
||||
"Square": expanders.Square,
|
||||
"TanhGrad": expanders.TanhGrad,
|
||||
"Tile": expanders.Tile,
|
||||
"LambApplyOptimizerAssign": expanders.LambApplyOptimizerAssign,
|
||||
}
|
||||
op_name = str(expand_info['name'])
|
||||
if not hasattr(expanders, op_name):
|
||||
if op_name not in expander_list:
|
||||
raise GraphKernelUnsupportedException("Generator do not support op: {}".format(op_name))
|
||||
expander = getattr(expanders, op_name)
|
||||
return expander(expand_info)
|
||||
return expander_list[op_name](expand_info)
|
||||
|
||||
|
||||
def extract_expand_info(kernel_info):
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
# ============================================================================
|
||||
"""expanders init"""
|
||||
|
||||
from .assign_add import AssignAdd
|
||||
from .bias_add import BiasAdd
|
||||
from .bias_add_grad import BiasAddGrad
|
||||
from .clip_by_norm_no_div_sum import ClipByNormNoDivSum
|
||||
|
@ -31,7 +32,13 @@ from .maximum_grad import MaximumGrad
|
|||
from .minimum_grad import MinimumGrad
|
||||
from .reduce_mean import ReduceMean
|
||||
from .softmax import Softmax
|
||||
from .sigmoid import Sigmoid
|
||||
from .sigmoid_grad import SigmoidGrad
|
||||
from .sigmoid_cross_entropy_with_logits import SigmoidCrossEntropyWithLogits
|
||||
from .sigmoid_cross_entropy_with_logits_grad import SigmoidCrossEntropyWithLogitsGrad
|
||||
from .softmax_cross_entropy_with_logits import SoftmaxCrossEntropyWithLogits
|
||||
from .sqrt_grad import SqrtGrad
|
||||
from .square import Square
|
||||
from .tanh_grad import TanhGrad
|
||||
from .tile import Tile
|
||||
from .lamb_apply_optimizer_assign import LambApplyOptimizerAssign
|
||||
|
|
|
@ -66,19 +66,18 @@ class Expander:
|
|||
|
||||
class ExpanderInfoValidator:
|
||||
"""ExpanderInfoValidator is the utility class which defines the validator decorator for expanders"""
|
||||
# pylint: disable=W0211
|
||||
@staticmethod
|
||||
def _add_check_function(cls, func):
|
||||
def _add_check_function(kls, func):
|
||||
"""
|
||||
Rewrite the function `_check` in class Expander
|
||||
to append the new `func` after the original checks.
|
||||
"""
|
||||
old_check = getattr(cls, "_check")
|
||||
old_check = getattr(kls, "_check")
|
||||
|
||||
def new_check(obj):
|
||||
old_check(obj)
|
||||
func(obj)
|
||||
setattr(cls, "_check", new_check)
|
||||
setattr(kls, "_check", new_check)
|
||||
|
||||
@staticmethod
|
||||
def add_format(*input_format):
|
||||
|
@ -112,7 +111,7 @@ class ExpanderInfoValidator:
|
|||
return wrapper
|
||||
|
||||
@staticmethod
|
||||
def check_all_formats_same(cls):
|
||||
def check_all_formats_same(kls):
|
||||
"""Check that all formats are the same"""
|
||||
def _check_format(obj):
|
||||
inp_formats = [inp['format'] for inp in obj.inputs]
|
||||
|
@ -122,10 +121,10 @@ class ExpanderInfoValidator:
|
|||
','.join(inp_formats), obj.name))
|
||||
|
||||
def wrapper(*args, **kargs):
|
||||
if not issubclass(cls, Expander):
|
||||
raise Exception("{} should be subclass of Expander.".format(cls.__name__))
|
||||
ExpanderInfoValidator._add_check_function(cls, _check_format)
|
||||
return cls(*args, **kargs)
|
||||
if not issubclass(kls, Expander):
|
||||
raise Exception("{} should be subclass of Expander.".format(kls.__name__))
|
||||
ExpanderInfoValidator._add_check_function(kls, _check_format)
|
||||
return kls(*args, **kargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
# Copyright 2021 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ===========================================================================
|
||||
"""generate json desc for assign_add"""
|
||||
from ._utils import Expander, ExpanderInfoValidator as VLD
|
||||
|
||||
|
||||
@VLD.check_all_formats_same
|
||||
class AssignAdd(Expander):
|
||||
"""AssignAdd expander"""
|
||||
|
||||
def _expand(self, graph_builder):
|
||||
param, x = self.inputs
|
||||
next_para = graph_builder.emit('Add', [param, x])
|
||||
|
||||
param_result = graph_builder.emit(
|
||||
'InplaceAssign', [param, next_para, next_para], attrs={'fake_output': True})
|
||||
|
||||
return param_result
|
|
@ -23,7 +23,7 @@ class GeLU(Expander):
|
|||
|
||||
def _expand(self, graph_builder):
|
||||
# cal formula are:
|
||||
# gelu(x) is 0.5 * x * (1.0 + tanh(y))
|
||||
# gelu of x is 0.5 * x * (1.0 + tanh(y))
|
||||
# y is sqrt(2.0 / pi) * (x + 0.044715 * x * x * x)
|
||||
|
||||
input_x = self.inputs[0]
|
||||
|
|
|
@ -25,7 +25,7 @@ class GeLUGrad(Expander):
|
|||
|
||||
def _expand(self, graph_builder):
|
||||
# cal formula are:
|
||||
# gelu_grad(dy, x) is dy * y'
|
||||
# gelu_grad of dy and x is dy * y'
|
||||
# y' is 0.5 * (1.0 + tanh(tanh_para)) + 0.5 * x * (1.0 - tanh(tanh_para) * tanh(para)) * mul_right
|
||||
# tanh_para is sqrt(2.0 / pi) * (x + 0.044715 * x * x * x)
|
||||
# mul_right is sqrt(2.0 / pi) * (1 + 3 * 0.044715 * x * x)
|
||||
|
|
|
@ -0,0 +1,76 @@
|
|||
# Copyright 2021 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ===========================================================================
|
||||
"""generate json desc for LambApplyOptimizerAssign"""
|
||||
from ._utils import Expander, ExpanderInfoValidator as VLD
|
||||
|
||||
@VLD.check_all_formats_same
|
||||
class LambApplyOptimizerAssign(Expander):
|
||||
"""LambApplyOptimizerAssign expander"""
|
||||
|
||||
def _expand(self, graph_builder):
|
||||
|
||||
[grad, inputv, inputm, input_param, beta_1, one_minus_beta_1, beta_2, one_minus_beta_2, epsilon, steps,
|
||||
do_use_weight, weight_decay_rate] = self.inputs
|
||||
|
||||
# next_v
|
||||
square_grad = graph_builder.emit('Mul', [grad, grad])
|
||||
mul_3_result = graph_builder.emit('Mul', [square_grad, one_minus_beta_2])
|
||||
mul_2_result = graph_builder.emit('Mul', [inputv, beta_2])
|
||||
next_v = graph_builder.emit('Add', [mul_2_result, mul_3_result])
|
||||
|
||||
# next_m
|
||||
mul_0_result = graph_builder.emit('Mul', [inputm, beta_1])
|
||||
mul_1_result = graph_builder.emit('Mul', [grad, one_minus_beta_1])
|
||||
next_m = graph_builder.emit('Add', [mul_0_result, mul_1_result])
|
||||
|
||||
shape = next_m.shape
|
||||
const_one = graph_builder.value(beta_2.dtype, 1)
|
||||
|
||||
beta_1_tensor = graph_builder.emit('BroadcastTo', [beta_1], attrs={'shape': shape})
|
||||
beta_2_tensor = graph_builder.emit('BroadcastTo', [beta_2], attrs={'shape': shape})
|
||||
|
||||
|
||||
# pow
|
||||
beta_1_log = graph_builder.emit('Log', [beta_1_tensor])
|
||||
mul_res_1 = graph_builder.emit('Mul', [beta_1_log, steps])
|
||||
beta_1_steps = graph_builder.emit('Exp', [mul_res_1])
|
||||
|
||||
neg_beta_1_step = graph_builder.emit('Neg', [beta_1_steps])
|
||||
beta1_correction = graph_builder.emit('Add', [neg_beta_1_step, const_one])
|
||||
|
||||
next_m_unbiased = graph_builder.emit('RealDiv', [next_m, beta1_correction])
|
||||
|
||||
# pow
|
||||
beta_2_log = graph_builder.emit('Log', [beta_2_tensor])
|
||||
mul_res_2 = graph_builder.emit('Mul', [beta_2_log, steps])
|
||||
beta_2_steps = graph_builder.emit('Exp', [mul_res_2])
|
||||
|
||||
neg_beta_2_step = graph_builder.emit('Neg', [beta_2_steps])
|
||||
beta2_correction = graph_builder.emit('Add', [neg_beta_2_step, const_one])
|
||||
|
||||
next_v_unbiased = graph_builder.emit('RealDiv', [next_v, beta2_correction])
|
||||
# update
|
||||
sqrt_next_v = graph_builder.emit('Sqrt', [next_v_unbiased])
|
||||
|
||||
add_2_result = graph_builder.emit('Add', [sqrt_next_v, epsilon])
|
||||
update = graph_builder.emit('RealDiv', [next_m_unbiased, add_2_result])
|
||||
# update do_use_weight_decay
|
||||
do_use_weight_mul = graph_builder.emit('Mul', [input_param, weight_decay_rate])
|
||||
do_use_weight_decay = graph_builder.emit('Mul', [do_use_weight_mul, do_use_weight])
|
||||
update = graph_builder.emit('Add', [do_use_weight_decay, update])
|
||||
|
||||
res = [update, next_v, next_m]
|
||||
|
||||
return res
|
|
@ -73,7 +73,7 @@ class LayerNormGrad(Expander):
|
|||
sum_3_mul = graph_builder.emit('Mul', [const_neg_two, x_sub_mean])
|
||||
sum_3 = graph_builder.emit('ReduceSum', [sum_3_mul], attrs={'reduce_axis': norm_axis, 'keep_dims': True})
|
||||
|
||||
# cal dx = dx1 + dx2 + dx3
|
||||
# cal dx, which is dx1 + dx2 + dx3
|
||||
dx_1 = graph_builder.emit('Mul', [dy_mul_gamma, rsqrt_var_eps])
|
||||
sum_1_mul_two = graph_builder.emit('Mul', [sum_1, const_two])
|
||||
sum_1_mul_two_tmp = graph_builder.emit('Mul', [sum_1_mul_two, mean_cof])
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
# Copyright 2021 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ===========================================================================
|
||||
"""generate json desc for Sigmoid"""
|
||||
from ._utils import Expander
|
||||
|
||||
|
||||
class Sigmoid(Expander):
|
||||
"""Sigmoid expander"""
|
||||
|
||||
def _expand(self, graph_builder):
|
||||
input_x = self.inputs[0]
|
||||
# Calculate sigmoid(x)
|
||||
# sigmoid of x is 1 / (1 + exp(-x))
|
||||
const_one = graph_builder.value(input_x.dtype, 1.0)
|
||||
neg_x = graph_builder.emit('Neg', [input_x])
|
||||
exp_neg_x = graph_builder.emit('Exp', [neg_x])
|
||||
add_exp = graph_builder.emit('Add', [const_one, exp_neg_x])
|
||||
res = graph_builder.emit('RealDiv', [const_one, add_exp])
|
||||
return res
|
|
@ -0,0 +1,40 @@
|
|||
# Copyright 2021 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ===========================================================================
|
||||
"""generate json desc for SigmoidCrossEntropyWithLogits"""
|
||||
from ._utils import Expander, ExpanderInfoValidator as VLD
|
||||
|
||||
|
||||
@VLD.check_all_formats_same
|
||||
class SigmoidCrossEntropyWithLogits(Expander):
|
||||
"""SigmoidCrossEntropyWithLogits expander"""
|
||||
|
||||
def _expand(self, graph_builder):
|
||||
logits, label = self.inputs
|
||||
# Calculate sigmoid_cross_entropy_with_logits(logits, label)
|
||||
# formula is: -(label * log(sigmoid(logits)) + (1 - label) * log(1 - sigmoid(logits)))
|
||||
const_one = graph_builder.value(logits.dtype, 1.0)
|
||||
neg_x = graph_builder.emit('Neg', [logits])
|
||||
exp_neg_x = graph_builder.emit('Exp', [neg_x])
|
||||
add_exp = graph_builder.emit('Add', [const_one, exp_neg_x])
|
||||
p = graph_builder.emit('RealDiv', [const_one, add_exp])
|
||||
one_sub_p = graph_builder.emit('Sub', [const_one, p])
|
||||
one_sub_label = graph_builder.emit('Sub', [const_one, label])
|
||||
log_p = graph_builder.emit('Log', [p])
|
||||
log_one_sub_p = graph_builder.emit('Log', [one_sub_p])
|
||||
res_tmp_1 = graph_builder.emit('Mul', [one_sub_label, log_one_sub_p])
|
||||
res_tmp_2 = graph_builder.emit('Mul', [label, log_p])
|
||||
res_tmp = graph_builder.emit('Add', [res_tmp_1, res_tmp_2])
|
||||
res = graph_builder.emit('Neg', [res_tmp])
|
||||
return res
|
|
@ -0,0 +1,34 @@
|
|||
# Copyright 2021 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ===========================================================================
|
||||
"""generate json desc for SigmoidCrossEntropyWithLogitsGrad"""
|
||||
from ._utils import Expander, ExpanderInfoValidator as VLD
|
||||
|
||||
|
||||
@VLD.check_all_formats_same
|
||||
class SigmoidCrossEntropyWithLogitsGrad(Expander):
|
||||
"""SigmoidCrossEntropyWithLogitsGrad expander"""
|
||||
|
||||
def _expand(self, graph_builder):
|
||||
logits, label, dout = self.inputs
|
||||
# Calculate sigmoid_cross_entropy_with_logits_grad(logits, label, dout)
|
||||
# formula of sigmoid_cross_entropy_with_logits_grad is : (sigmoid(logits) - label) * dout
|
||||
const_one = graph_builder.value(logits.dtype, 1.0)
|
||||
neg_x = graph_builder.emit('Neg', [logits])
|
||||
exp_neg_x = graph_builder.emit('Exp', [neg_x])
|
||||
add_exp = graph_builder.emit('Add', [const_one, exp_neg_x])
|
||||
sigmoid_res = graph_builder.emit('RealDiv', [const_one, add_exp])
|
||||
sigmoid_res_sub_label = graph_builder.emit('Sub', [sigmoid_res, label])
|
||||
res = graph_builder.emit('Mul', [sigmoid_res_sub_label, dout])
|
||||
return res
|
|
@ -0,0 +1,31 @@
|
|||
# Copyright 2021 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ===========================================================================
|
||||
"""generate json desc for SigmoidGrad"""
|
||||
from ._utils import Expander, ExpanderInfoValidator as VLD
|
||||
|
||||
|
||||
@VLD.check_all_formats_same
|
||||
class SigmoidGrad(Expander):
|
||||
"""SigmoidGrad expander"""
|
||||
|
||||
def _expand(self, graph_builder):
|
||||
input_y, dy = self.inputs
|
||||
# Calculate sigmoid_grad(y, dy)
|
||||
# formula of sigmoid_grad is : (1 - y) * y * dy
|
||||
const_one = graph_builder.value(input_y.dtype, 1.0)
|
||||
one_mins_y = graph_builder.emit('Sub', [const_one, input_y])
|
||||
y_mul_dy = graph_builder.emit('Mul', [input_y, dy])
|
||||
res = graph_builder.emit('Mul', [one_mins_y, y_mul_dy])
|
||||
return res
|
|
@ -0,0 +1,40 @@
|
|||
# Copyright 2021 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ===========================================================================
|
||||
"""generate json desc for SoftmaxCrossEntropyWithLogits"""
|
||||
from mindspore._extends.graph_kernel.model.model import DataFormat as DF
|
||||
from ._utils import Expander, ExpanderInfoValidator as VLD
|
||||
|
||||
|
||||
@VLD.add_format(DF.DEFAULT, DF.DEFAULT)
|
||||
class SoftmaxCrossEntropyWithLogits(Expander):
|
||||
"""SoftmaxCrossEntropyWithLogits expander"""
|
||||
|
||||
def _expand(self, graph_builder):
|
||||
logits, label = self.inputs
|
||||
# Calculate softmax_cross_entropy_with_logits(logits, label)
|
||||
# formula of softmax_cross_entropy_with_logits is : -reduce_sum(label * log(softmax(logits)))
|
||||
axis = (-1,)
|
||||
max_x = graph_builder.emit('ReduceMax', [logits], attrs={'reduce_axis': axis, 'keep_dims': True})
|
||||
data_sub = graph_builder.emit('Sub', [logits, max_x])
|
||||
data_exp = graph_builder.emit('Exp', [data_sub])
|
||||
data_expsum = graph_builder.emit('ReduceSum', [data_exp], attrs={'reduce_axis': axis, 'keep_dims': True})
|
||||
data_softmax = graph_builder.emit('RealDiv', [data_exp, data_expsum])
|
||||
softmax_log = graph_builder.emit('Log', [data_softmax])
|
||||
label_mul_log = graph_builder.emit('Mul', [label, softmax_log])
|
||||
tmp_res = data_expsum = graph_builder.emit('ReduceSum', [label_mul_log], attrs={
|
||||
'reduce_axis': axis, 'keep_dims': True})
|
||||
loss = graph_builder.emit('Neg', [tmp_res])
|
||||
dlogits = graph_builder.emit('Sub', [data_softmax, label])
|
||||
return loss, dlogits
|
|
@ -21,7 +21,7 @@ class SqrtGrad(Expander):
|
|||
"""SqrtGrad expander"""
|
||||
|
||||
def _expand(self, graph_builder):
|
||||
# sqrt_grad(x, dout) = dout / (2 * x)
|
||||
# formula of sqrt_grad is dout / (2 * x)
|
||||
x, dout = self.inputs
|
||||
const_two = graph_builder.value(x.dtype, 2)
|
||||
dividend = graph_builder.emit('Mul', [x, const_two])
|
||||
|
|
|
@ -177,7 +177,6 @@ class PrimLib:
|
|||
'ReduceMax': Prim(REDUCE),
|
||||
'ReduceMin': Prim(REDUCE),
|
||||
'MakeTuple': Prim(CONTROL),
|
||||
'ControlDepend': Prim(CONTROL),
|
||||
'Assign': Prim(ELEMWISE),
|
||||
'Tanh': Prim(ELEMWISE),
|
||||
'ExpandDims': Prim(RESHAPE),
|
||||
|
|
|
@ -18,7 +18,7 @@ import os
|
|||
import sys
|
||||
from te.platform.cce_conf import te_set_version
|
||||
from te.platform.fusion_util import fusion_op
|
||||
import te
|
||||
import tbe.common.context.op_info as operator_info
|
||||
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
|
||||
# pylint: disable=wrong-import-position
|
||||
from tbe_common import check_kernel_info, get_args, get_built_in_impl_path
|
||||
|
@ -68,6 +68,7 @@ def build_op(build_type, json_str, tune_mode=None):
|
|||
check_kernel_info(kernel_info)
|
||||
te_set_version(kernel_info["op_info"]["socVersion"])
|
||||
op_name = kernel_info['op_info']['name']
|
||||
op_type = kernel_info['op_info']['Type']
|
||||
|
||||
try:
|
||||
custom_flag = False
|
||||
|
@ -114,17 +115,19 @@ def build_op(build_type, json_str, tune_mode=None):
|
|||
|
||||
# call function
|
||||
if is_dynamic_shape:
|
||||
# with te.op.dynamic():
|
||||
import tbe.common.context.op_context as op_context
|
||||
with op_context.OpContext("dynamic"):
|
||||
op_info = operator_info.OpInfo(op_type, op_type)
|
||||
op_context.get_context().add_op_info(op_info)
|
||||
op_func(*inputs_args, *outputs_args, *attrs_args, kernel_name=kernel_name)
|
||||
compile_info = op_context.get_context().get_compile_info()
|
||||
if tune_mode is not None:
|
||||
return (te.op.get_compile_info()), (inputs_args, outputs_args, attrs_args), op_module_name
|
||||
return te.op.get_compile_info()
|
||||
return compile_info, (inputs_args, outputs_args, attrs_args), op_module_name
|
||||
return compile_info
|
||||
else:
|
||||
res = op_func(*inputs_args, *outputs_args, *attrs_args, kernel_name=kernel_name)
|
||||
if tune_mode is not None:
|
||||
return res, (inputs_args, outputs_args, attrs_args), op_module_name
|
||||
return None, (inputs_args, outputs_args, attrs_args), op_module_name
|
||||
return res
|
||||
|
||||
except Exception as e:
|
||||
|
|
|
@ -143,7 +143,6 @@ def single_to_fusion(json_file, tune_mode):
|
|||
"l1_size": -1,
|
||||
"op_list": ops
|
||||
}
|
||||
# op_info = {"fusion_op": end_file}
|
||||
res = json.dumps(end_file, ensure_ascii=False)
|
||||
return res
|
||||
|
||||
|
|
|
@ -34,6 +34,8 @@ RL_COMPILE = "RL_COMPILE"
|
|||
RL_OFFLINE = "RL_OFFLINE"
|
||||
RL_ONLINE = "RL_ONLINE"
|
||||
|
||||
COMPILE_TIME_OUT_SECONDS = 600
|
||||
|
||||
|
||||
def create_tbe_parallel_process():
|
||||
"""
|
||||
|
@ -102,8 +104,8 @@ def run_compiler(op_json):
|
|||
"""
|
||||
try:
|
||||
tbe_compiler = os.path.join(os.path.split(os.path.realpath(__file__))[0], "compiler.py")
|
||||
completed_object = subprocess.run([sys.executable, tbe_compiler], input=op_json, timeout=300,
|
||||
text=True, capture_output=True, check=True)
|
||||
completed_object = subprocess.run([sys.executable, tbe_compiler], input=op_json,
|
||||
timeout=COMPILE_TIME_OUT_SECONDS, text=True, capture_output=True, check=True)
|
||||
return "Success", completed_object.stderr
|
||||
except subprocess.TimeoutExpired:
|
||||
tb = traceback.format_exc()
|
||||
|
@ -163,7 +165,7 @@ class TbeProcess:
|
|||
res = "TBEException", \
|
||||
"ERROR: [MS_BUILD_PROCESS_NUM] should be in range(1, 25), but got : " + str(process_num)
|
||||
elif not process_num.isdigit():
|
||||
res = "TBEException", "ERROR: [MS_BUILD_PROCESS_NUM] type should be a int num, but got :" + process_num
|
||||
res = "TBEException", "ERROR: [MS_BUILD_PROCESS_NUM] type should be an int num, but got :" + process_num
|
||||
return res
|
||||
|
||||
def init_auto_tune_env(self, tune_mode):
|
||||
|
@ -331,6 +333,8 @@ class TbeProcess:
|
|||
|
||||
if tune_mode == RL_TUNE:
|
||||
ret, job_type, compile_info = self.__tuner.rl_tune(task_id, op_json)
|
||||
if isinstance(compile_info, dict):
|
||||
compile_info = json.dumps(compile_info)
|
||||
if job_type is RL_OFFLINE or job_type is RL_ONLINE:
|
||||
if not ret:
|
||||
# offline and online hit will return false
|
||||
|
@ -361,7 +365,7 @@ class TbeProcess:
|
|||
ret = 0, "Failed", "Failed"
|
||||
if self.__running_tasks:
|
||||
task_id, task_future = self.__running_tasks.pop(0)
|
||||
ret_type, result = task_future.get(330)
|
||||
ret_type, result = task_future.get(COMPILE_TIME_OUT_SECONDS)
|
||||
if ret_type == "Success":
|
||||
ret = task_id, "Success", result
|
||||
elif ret_type in ("Exception", "TBEException"):
|
||||
|
@ -388,7 +392,7 @@ class TbeProcess:
|
|||
for item in ret:
|
||||
task_id = item['task_id']
|
||||
status_code = item['status_code']
|
||||
compile_info = item["op_res"] if "op_res" in item else "{}"
|
||||
compile_info = json.dumps(item["op_res"] if "op_res" in item else None)
|
||||
res = None
|
||||
if status_code == 0:
|
||||
res = task_id, "Success", compile_info
|
||||
|
|
|
@ -170,6 +170,23 @@ class TbeTuner:
|
|||
|
||||
return soc_info
|
||||
|
||||
def check_te_log(self, te_log_level):
|
||||
"""
|
||||
Check te log level
|
||||
:param te_log_level:
|
||||
:return:
|
||||
"""
|
||||
res = True
|
||||
if te_log_level.isdigit() and int(te_log_level) >= len(TE_LOG_LEVEL):
|
||||
log.error(f"Invalid environment TE_LOGLEVEL, the value should be in [0, 4) if it is a digit, but got : "
|
||||
f"{te_log_level}")
|
||||
res = False
|
||||
elif te_log_level.upper() not in TE_LOG_LEVEL:
|
||||
log.error(f"Invalid environment TE_LOGLEVEL, the value should be one of [DEBUG, INFO, WARNING, ERROR] "
|
||||
f"if it is a string, but got :{te_log_level}")
|
||||
res = False
|
||||
return res
|
||||
|
||||
def parallel_compilation_init(self, soc_info, tune_mode, process_num):
|
||||
"""
|
||||
Initialize parallel compilation framework for tuner
|
||||
|
@ -201,14 +218,7 @@ class TbeTuner:
|
|||
os.environ["TE_LOGLEVEL"] = TE_LOG_LEVEL[2]
|
||||
global_loglevel = 3
|
||||
else:
|
||||
# pylint: disable=no-else-return
|
||||
if te_log_level.isdigit() and int(te_log_level) >= len(TE_LOG_LEVEL):
|
||||
log.error(f"Invalid environment TE_LOGLEVEL, the value should be in [0, 4) if it is a digit, but got : "
|
||||
f"{te_log_level}")
|
||||
return False
|
||||
elif te_log_level.upper() not in TE_LOG_LEVEL:
|
||||
log.error(f"Invalid environment TE_LOGLEVEL, the value should be one of [DEBUG, INFO, WARNING, ERROR] "
|
||||
f"if it is a string, but got :{te_log_level}")
|
||||
if not self.check_te_log(te_log_level):
|
||||
return False
|
||||
global_loglevel = int(te_log_level) if te_log_level.isdigit() else TE_LOG_LEVEL.index(te_log_level.upper())
|
||||
ret = init_multi_process_env(embedding, soc_info, tune_mode, global_loglevel, enable_event, pid_ts)
|
||||
|
@ -296,7 +306,7 @@ class TbeTuner:
|
|||
# todo build with build_single_op_from_c
|
||||
base_kernel = './kernel_meta/' + kernel_name + '.o'
|
||||
job_type = RL_COMPILE
|
||||
compile_info = "{}"
|
||||
compile_info = None
|
||||
try:
|
||||
compile_info, op_args, op_module_name = build_op(OP_BUILD, json.dumps(json_info), tune_mode)
|
||||
# pylint: disable=broad-except
|
||||
|
@ -317,7 +327,7 @@ class TbeTuner:
|
|||
|
||||
self.module_list[op_module_name] = 1
|
||||
self.fusion_need_sync += 1
|
||||
return ret, job_type, json.dumps(compile_info)
|
||||
return ret, job_type, compile_info
|
||||
|
||||
def fusion_rl_tune(self, task_id, json_info):
|
||||
"""
|
||||
|
@ -334,6 +344,7 @@ class TbeTuner:
|
|||
converted_json = fusion_to_fusion(json.dumps(json_info), tune_mode="RL")
|
||||
job_type = RL_COMPILE
|
||||
base_kernel = './kernel_meta/' + kernel_name + '.o'
|
||||
compile_info = None
|
||||
try:
|
||||
fusion_op(converted_json)
|
||||
# pylint: disable=broad-except
|
||||
|
@ -341,7 +352,7 @@ class TbeTuner:
|
|||
exc_type, exc_value, _ = sys.exc_info()
|
||||
log.error(
|
||||
"exc_type:{}, exc_value:{}, exc_traceback:{}".format(exc_type, exc_value, traceback.format_exc()))
|
||||
return False, job_type
|
||||
return False, job_type, compile_info
|
||||
if self.offline_tune:
|
||||
job_type = RL_OFFLINE
|
||||
dump_fusion_json(converted_json, self.offline_dump_path)
|
||||
|
@ -351,7 +362,7 @@ class TbeTuner:
|
|||
l1size = 0
|
||||
ret = dispatch_fusion_tune_task(graph_id, task_id, l1size, base_kernel, kernel_name, full_name,
|
||||
converted_json)
|
||||
return ret, job_type
|
||||
return ret, job_type, compile_info
|
||||
|
||||
def fusion_ga_tune(self, task_id, json_info):
|
||||
"""
|
||||
|
|
|
@ -21,13 +21,16 @@ from .parser import (Parser, create_obj_instance, generate_scope,
|
|||
get_class_member_namespace_symbol, create_slice_obj,
|
||||
get_dataclass_attributes, get_dataclass_methods, get_obj_id,
|
||||
get_module_namespace, get_obj_type, get_object_key,
|
||||
get_ast_type, get_node_type, get_args, get_args_default_values,
|
||||
get_ast_namespace_symbol, get_operation_namespace_symbol,
|
||||
get_parse_method_of_class, get_scope_name, expand_expr_statement,
|
||||
is_class_member, parse_cb, resolve_symbol, convert_to_ms_tensor, get_object_description)
|
||||
from .serialize import *
|
||||
|
||||
__all__ = ['parse_cb', 'get_parse_method_of_class', 'get_bprop_method_of_class', 'resolve_symbol',
|
||||
'get_object_key', 'get_class_instance_type', 'is_class_member',
|
||||
'get_obj_type', 'get_obj_id', 'create_obj_instance', 'get_module_namespace',
|
||||
'get_object_key', 'get_class_instance_type', 'is_class_member', 'get_ast_type', 'get_node_type',
|
||||
'get_args_default_values', 'get_ast_namespace_symbol', 'get_operation_namespace_symbol',
|
||||
'get_args', 'get_obj_type', 'get_obj_id', 'create_obj_instance', 'get_module_namespace',
|
||||
'get_class_member_namespace_symbol', 'get_obj_id', 'Parser', 'get_dataclass_attributes',
|
||||
'get_dataclass_methods', 'dump_obj', 'load_obj', 'get_dataclass_methods', 'get_scope_name',
|
||||
'create_slice_obj', 'convert_to_ms_tensor', 'get_object_description', 'expand_expr_statement']
|
||||
|
|
|
@ -371,6 +371,89 @@ def expand_expr_statement(node):
|
|||
return (False,)
|
||||
|
||||
|
||||
def get_ast_namespace_symbol(obj):
|
||||
"""Get obj type and namespace and symbol."""
|
||||
# step 1:get symbol from object map
|
||||
ops_info = parse_object_map.get(type(obj), SYMBOL_UNDEFINE)
|
||||
logger.debug("ops info = %r", ops_info)
|
||||
return ops_info
|
||||
|
||||
|
||||
def get_operation_namespace_symbol(var: str):
|
||||
"""Get operation namespace and symbol."""
|
||||
ops_info = (trope_ns, var)
|
||||
logger.debug("get operation ops info = %r", ops_info)
|
||||
return ops_info
|
||||
|
||||
|
||||
def get_ast_type(node):
|
||||
"""Get the ast type."""
|
||||
ast_type = AST_SUB_TYPE_UNKNOWN
|
||||
if isinstance(node, ast.And):
|
||||
ast_type = AST_SUB_TYPE_AND
|
||||
elif isinstance(node, ast.Or):
|
||||
ast_type = AST_SUB_TYPE_OR
|
||||
elif isinstance(node, ast.Name):
|
||||
ast_type = AST_SUB_TYPE_NAME
|
||||
elif isinstance(node, ast.Tuple):
|
||||
ast_type = AST_SUB_TYPE_TUPLE
|
||||
elif isinstance(node, ast.Subscript):
|
||||
ast_type = AST_SUB_TYPE_SUBSCRIPT
|
||||
elif isinstance(node, ast.Starred):
|
||||
ast_type = AST_SUB_TYPE_STARRED
|
||||
elif isinstance(node, ast.Attribute):
|
||||
ast_type = AST_SUB_TYPE_ATTRIBUTE
|
||||
else:
|
||||
ast_type = AST_SUB_TYPE_UNKNOWN
|
||||
return ast_type
|
||||
|
||||
|
||||
def get_node_type(node):
|
||||
"""Process an ast node."""
|
||||
method_name = f'{node.__class__.__name__}'
|
||||
node_type = [method_name]
|
||||
# judge the ast main type
|
||||
if isinstance(node, ast.stmt):
|
||||
node_type.append(AST_MAIN_TYPE_STMT)
|
||||
elif isinstance(node, (ast.expr, ast.slice)) or node is None:
|
||||
# ast.slice and ast.expr should be expr
|
||||
node_type.append(AST_MAIN_TYPE_EXPR)
|
||||
else:
|
||||
node_type.append(AST_MAIN_TYPE_UNKNOWN)
|
||||
return node_type
|
||||
|
||||
|
||||
def get_args_default_values(node):
|
||||
"""get the args'default values of parse object."""
|
||||
nondefaults = [None] * (len(node.args.args) - len(node.args.defaults))
|
||||
defaults = nondefaults + node.args.defaults + node.args.kw_defaults
|
||||
if node.args.vararg:
|
||||
defaults.append(None)
|
||||
if node.args.kwarg:
|
||||
defaults.append(None)
|
||||
return defaults
|
||||
|
||||
|
||||
def get_args(node):
|
||||
"""Get the arg of parse object."""
|
||||
args = []
|
||||
# process position args
|
||||
for arg in node.args.args:
|
||||
args.append(arg)
|
||||
|
||||
# process kwonlyargs: kwonlyargs is append after position args
|
||||
if node.args.kwonlyargs:
|
||||
for kwarg in node.args.kwonlyargs:
|
||||
args.append(kwarg)
|
||||
# process vararg: vararg is append after kwonlyargs
|
||||
if node.args.vararg:
|
||||
args.append(node.args.vararg)
|
||||
# process kwarg: kwarg is append after vararg
|
||||
if node.args.kwarg:
|
||||
args.append(node.args.kwarg)
|
||||
return args
|
||||
|
||||
|
||||
class Parser:
|
||||
"""
|
||||
Parser python code to ast tree.
|
||||
|
@ -416,102 +499,28 @@ class Parser:
|
|||
idt_err.filename = self.filename
|
||||
idt_err.lineno = self.line_offset
|
||||
idt_err.msg = f"There are incorrect indentations in definition or comment of function: " \
|
||||
f"'{self.fn.__qualname__}'."
|
||||
f"'{self.fn.__qualname__}'."
|
||||
raise idt_err
|
||||
Parser.ast_cache[hexstr] = tree
|
||||
else:
|
||||
logger.error("Fn type is invalid")
|
||||
return tree
|
||||
|
||||
def get_args(self, node):
|
||||
"""Get the arg of parse object."""
|
||||
args = []
|
||||
# process position args
|
||||
for arg in node.args.args:
|
||||
args.append(arg)
|
||||
|
||||
# process kwonlyargs: kwonlyargs is append after position args
|
||||
if node.args.kwonlyargs:
|
||||
for kwarg in node.args.kwonlyargs:
|
||||
args.append(kwarg)
|
||||
# process vararg: vararg is append after kwonlyargs
|
||||
if node.args.vararg:
|
||||
args.append(node.args.vararg)
|
||||
# process kwarg: kwarg is append after vararg
|
||||
if node.args.kwarg:
|
||||
args.append(node.args.kwarg)
|
||||
return args
|
||||
|
||||
def get_args_default_values(self, node):
|
||||
"""get the args'default values of parse object."""
|
||||
nondefaults = [None] * (len(node.args.args) - len(node.args.defaults))
|
||||
defaults = nondefaults + node.args.defaults + node.args.kw_defaults
|
||||
if node.args.vararg:
|
||||
defaults.append(None)
|
||||
if node.args.kwarg:
|
||||
defaults.append(None)
|
||||
return defaults
|
||||
|
||||
def get_node_type(self, node):
|
||||
"""Process an ast node."""
|
||||
method_name = f'{node.__class__.__name__}'
|
||||
node_type = [method_name]
|
||||
# judge the ast main type
|
||||
if isinstance(node, ast.stmt):
|
||||
node_type.append(AST_MAIN_TYPE_STMT)
|
||||
elif isinstance(node, (ast.expr, ast.slice)) or node is None:
|
||||
# ast.slice and ast.expr should be expr
|
||||
node_type.append(AST_MAIN_TYPE_EXPR)
|
||||
else:
|
||||
node_type.append(AST_MAIN_TYPE_UNKNOWN)
|
||||
return node_type
|
||||
|
||||
def get_ast_type(self, node):
|
||||
"""Get the ast type."""
|
||||
ast_type = AST_SUB_TYPE_UNKNOWN
|
||||
if isinstance(node, ast.And):
|
||||
ast_type = AST_SUB_TYPE_AND
|
||||
elif isinstance(node, ast.Or):
|
||||
ast_type = AST_SUB_TYPE_OR
|
||||
elif isinstance(node, ast.Name):
|
||||
ast_type = AST_SUB_TYPE_NAME
|
||||
elif isinstance(node, ast.Tuple):
|
||||
ast_type = AST_SUB_TYPE_TUPLE
|
||||
elif isinstance(node, ast.Subscript):
|
||||
ast_type = AST_SUB_TYPE_SUBSCRIPT
|
||||
elif isinstance(node, ast.Starred):
|
||||
ast_type = AST_SUB_TYPE_STARRED
|
||||
elif isinstance(node, ast.Attribute):
|
||||
ast_type = AST_SUB_TYPE_ATTRIBUTE
|
||||
else:
|
||||
ast_type = AST_SUB_TYPE_UNKNOWN
|
||||
return ast_type
|
||||
|
||||
def get_namespace_symbol(self, var: str):
|
||||
|
||||
"""Get symbol type and namespace and symbol."""
|
||||
if var in self.closure_namespace:
|
||||
ops_info = (self.closure_namespace, var)
|
||||
logger.debug("in closure_namespace")
|
||||
elif var in self.global_namespace:
|
||||
ops_info = (self.global_namespace, var)
|
||||
return self.closure_namespace, var
|
||||
if var in self.global_namespace:
|
||||
logger.debug("in global_namespace")
|
||||
else:
|
||||
ops_info = parse_object_map.get(SYMBOL_UNDEFINE)
|
||||
ops_info = [ops_info[0], var]
|
||||
return ops_info
|
||||
|
||||
def get_operation_namespace_symbol(self, var: str):
|
||||
"""Get operation namespace and symbol."""
|
||||
ops_info = (trope_ns, var)
|
||||
logger.debug("get operation ops info = %r", ops_info)
|
||||
return ops_info
|
||||
|
||||
def get_ast_namespace_symbol(self, obj):
|
||||
"""Get obj type and namespace and symbol."""
|
||||
# step 1:get symbol from object map
|
||||
ops_info = parse_object_map.get(type(obj), SYMBOL_UNDEFINE)
|
||||
logger.debug("ops info = %r", ops_info)
|
||||
return ops_info
|
||||
value = self.global_namespace[var]
|
||||
if isinstance(value, type(abs)) and self.global_namespace[var] not in convert_object_map:
|
||||
error_info = f"The builtin function '{var}' is not supported in graph mode."
|
||||
return None, var, error_info
|
||||
return self.global_namespace, var
|
||||
error_info = f"The name '{var}' is not defined."
|
||||
return None, var, error_info
|
||||
|
||||
def analyze_super(self, class_type_node, subclass_instance):
|
||||
"""Analyze super and return a class instance."""
|
||||
|
|
|
@ -214,7 +214,7 @@ set(SUB_COMP
|
|||
frontend/operator
|
||||
pipeline/jit
|
||||
pipeline/pynative
|
||||
common debug pybind_api utils vm profiler ps mindquantum
|
||||
common debug pybind_api utils vm profiler ps
|
||||
)
|
||||
|
||||
foreach(_comp ${SUB_COMP})
|
||||
|
|
|
@ -53,7 +53,7 @@ if(ENABLE_CPU)
|
|||
set_property(SOURCE ${QUANTUM_SRC_LIST} PROPERTY COMPILE_DEFINITIONS
|
||||
SUBMODULE_ID=mindspore::SubModuleId::SM_MINDQUANTUM)
|
||||
set_property(SOURCE ${QUANTUM_SRC_LIST} PROPERTY COMPILE_DEFINITIONS INTRIN)
|
||||
set_property(SOURCE ${QUANTUM_SRC_LIST} PROPERTY COMPILE_OPTIONS -fopenmp -march=native -ffast-math)
|
||||
set_property(SOURCE ${QUANTUM_SRC_LIST} PROPERTY COMPILE_OPTIONS -fopenmp -mavx -ffast-math)
|
||||
else()
|
||||
message("not compiled quantum kernel_compiler")
|
||||
set(QUANTUM_SRC_LIST "")
|
||||
|
|
|
@ -102,7 +102,8 @@ bool AkgKernelBuilder::AkgOpParallelBuild(const std::vector<JsonNodePair> &build
|
|||
return true;
|
||||
}
|
||||
|
||||
kernel::KernelBuildClient *client = GetClient();
|
||||
auto client = GetClient();
|
||||
MS_EXCEPTION_IF_NULL(client);
|
||||
if (!client->AkgStart(PROCESS_NUM, TIME_OUT)) {
|
||||
MS_LOG(ERROR) << "Akg start failed.";
|
||||
return false;
|
||||
|
|
|
@ -50,7 +50,6 @@ class AkgKernelBuilder {
|
|||
bool AkgOpParallelBuild(const std::vector<JsonNodePair> &build_args);
|
||||
std::vector<JsonNodePair> repeat_nodes_;
|
||||
};
|
||||
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
||||
|
||||
|
|
|
@ -76,15 +76,16 @@ void ArithmeticCPUKernel::RealDiv(const T *input1, const T *input2, T *out, size
|
|||
GenIndex(i, &idx);
|
||||
auto dividend = input1[idx[0]];
|
||||
auto divisor = input2[idx[1]];
|
||||
if (divisor == 0) {
|
||||
if (dividend == 0) {
|
||||
auto zero = (T)0;
|
||||
if (divisor == zero) {
|
||||
if (dividend == zero) {
|
||||
out[i] = std::numeric_limits<T>::quiet_NaN();
|
||||
continue;
|
||||
}
|
||||
if (std::numeric_limits<T>::has_infinity) {
|
||||
out[i] = dividend > 0 ? std::numeric_limits<T>::infinity() : -std::numeric_limits<T>::infinity();
|
||||
out[i] = dividend > zero ? std::numeric_limits<T>::infinity() : -std::numeric_limits<T>::infinity();
|
||||
} else {
|
||||
out[i] = dividend > 0 ? std::numeric_limits<T>::max() : std::numeric_limits<T>::min();
|
||||
out[i] = dividend > zero ? std::numeric_limits<T>::max() : std::numeric_limits<T>::min();
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
@ -102,15 +103,16 @@ void ArithmeticCPUKernel::Div(const T *input1, const T *input2, T *out, size_t s
|
|||
GenIndex(i, &idx);
|
||||
auto dividend = input1[idx[0]];
|
||||
auto divisor = input2[idx[1]];
|
||||
if (divisor == 0) {
|
||||
if (dividend == 0) {
|
||||
auto zero = (T)0;
|
||||
if (divisor == zero) {
|
||||
if (dividend == zero) {
|
||||
out[i] = std::numeric_limits<T>::quiet_NaN();
|
||||
continue;
|
||||
}
|
||||
if (std::numeric_limits<T>::has_infinity) {
|
||||
out[i] = dividend > 0 ? std::numeric_limits<T>::infinity() : -std::numeric_limits<T>::infinity();
|
||||
out[i] = dividend > zero ? std::numeric_limits<T>::infinity() : -std::numeric_limits<T>::infinity();
|
||||
} else {
|
||||
out[i] = dividend > 0 ? std::numeric_limits<T>::max() : std::numeric_limits<T>::min();
|
||||
out[i] = dividend > zero ? std::numeric_limits<T>::max() : std::numeric_limits<T>::min();
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
@ -128,19 +130,20 @@ void ArithmeticCPUKernel::FloorDiv(const T *input1, const T *input2, T *out, siz
|
|||
GenIndex(i, &idx);
|
||||
auto dividend = input1[idx[0]];
|
||||
auto divisor = input2[idx[1]];
|
||||
if (divisor == 0) {
|
||||
if (dividend == 0) {
|
||||
auto zero = (T)0;
|
||||
if (divisor == zero) {
|
||||
if (dividend == zero) {
|
||||
out[i] = std::numeric_limits<T>::quiet_NaN();
|
||||
continue;
|
||||
}
|
||||
if (std::numeric_limits<T>::has_infinity) {
|
||||
out[i] = dividend > 0 ? std::numeric_limits<T>::infinity() : -std::numeric_limits<T>::infinity();
|
||||
out[i] = dividend > zero ? std::numeric_limits<T>::infinity() : -std::numeric_limits<T>::infinity();
|
||||
} else {
|
||||
out[i] = dividend > 0 ? std::numeric_limits<T>::max() : std::numeric_limits<T>::min();
|
||||
out[i] = dividend > zero ? std::numeric_limits<T>::max() : std::numeric_limits<T>::min();
|
||||
}
|
||||
continue;
|
||||
}
|
||||
out[i] = floor(dividend / divisor);
|
||||
out[i] = (T)floor(static_cast<double>(dividend) / static_cast<double>(divisor));
|
||||
}
|
||||
};
|
||||
CPUKernelUtils::ParallelFor(task, size);
|
||||
|
@ -295,7 +298,7 @@ void ArithmeticCPUKernel::Atan2(const T *input1, const T *input2, T *out, size_t
|
|||
for (size_t i = start; i < end; i++) {
|
||||
std::vector<size_t> idx;
|
||||
GenIndex(i, &idx);
|
||||
out[i] = atan2(input1[idx[0]], input2[idx[1]]);
|
||||
out[i] = (T)atan2(static_cast<double>(input1[idx[0]]), static_cast<double>(input2[idx[1]]));
|
||||
}
|
||||
};
|
||||
CPUKernelUtils::ParallelFor(task, size);
|
||||
|
@ -348,8 +351,8 @@ void ArithmeticCPUKernel::InitKernel(const CNodePtr &kernel_node) {
|
|||
CPUKernelUtils::GetElementNumEveryDim(input_shape0_, &input_element_num0_);
|
||||
CPUKernelUtils::GetElementNumEveryDim(input_shape1_, &input_element_num1_);
|
||||
CPUKernelUtils::GetElementNumEveryDim(output_shape_, &output_element_num_);
|
||||
dtype_ = AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, 0);
|
||||
if (dtype_ != AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, 1)) {
|
||||
dtype_ = AnfAlgo::GetInputDeviceDataType(kernel_node, 0);
|
||||
if (dtype_ != AnfAlgo::GetInputDeviceDataType(kernel_node, 1)) {
|
||||
MS_LOG(EXCEPTION) << "Input0 and input1 must has the same data type";
|
||||
}
|
||||
target_dtype_ = AnfAlgo::GetOutputInferDataType(kernel_node, 0);
|
||||
|
@ -358,14 +361,26 @@ void ArithmeticCPUKernel::InitKernel(const CNodePtr &kernel_node) {
|
|||
bool ArithmeticCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs,
|
||||
const std::vector<kernel::AddressPtr> & /*workspace*/,
|
||||
const std::vector<kernel::AddressPtr> &outputs) {
|
||||
if (dtype_ == kNumberTypeInt32 || dtype_ == kNumberTypeInt16 || dtype_ == kNumberTypeInt8) {
|
||||
if (dtype_ == kNumberTypeInt32) {
|
||||
LaunchKernel<int>(inputs, outputs);
|
||||
} else if (dtype_ == kNumberTypeFloat32 || dtype_ == kNumberTypeFloat16 || dtype_ == kNumberTypeFloat64) {
|
||||
} else if (dtype_ == kNumberTypeFloat32) {
|
||||
LaunchKernel<float>(inputs, outputs);
|
||||
} else if (dtype_ == kNumberTypeInt64) {
|
||||
LaunchKernel<int64_t>(inputs, outputs);
|
||||
} else if (dtype_ == kNumberTypeBool) {
|
||||
LaunchKernelLogic<bool>(inputs, outputs);
|
||||
} else if (dtype_ == kNumberTypeInt8) {
|
||||
LaunchKernel<int8_t>(inputs, outputs);
|
||||
} else if (dtype_ == kNumberTypeInt16) {
|
||||
LaunchKernel<int16_t>(inputs, outputs);
|
||||
} else if (dtype_ == kNumberTypeFloat16) {
|
||||
LaunchKernel<float16>(inputs, outputs);
|
||||
} else if (dtype_ == kNumberTypeFloat64) {
|
||||
LaunchKernel<double>(inputs, outputs);
|
||||
} else if (dtype_ == kNumberTypeUInt8) {
|
||||
LaunchKernel<uint8_t>(inputs, outputs);
|
||||
} else if (dtype_ == kNumberTypeUInt32) {
|
||||
LaunchKernel<uint32_t>(inputs, outputs);
|
||||
} else {
|
||||
MS_LOG(EXCEPTION) << "Data type " << TypeIdLabel(dtype_) << "is not support.";
|
||||
}
|
||||
|
|
|
@ -30,7 +30,6 @@ void AssignCPUKernel::InitKernel(const CNodePtr &kernel_node) {
|
|||
MS_EXCEPTION_IF_NULL(kernel_node);
|
||||
auto input_x_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
|
||||
auto input_y_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1);
|
||||
|
||||
if (input_x_shape.size() != input_y_shape.size()) MS_LOG(EXCEPTION) << "x y must be same shape";
|
||||
for (size_t i = 0; i < input_x_shape.size(); ++i) {
|
||||
if (input_x_shape[i] != input_y_shape[i]) {
|
||||
|
|
|
@ -57,7 +57,7 @@ bool BiasAddCPUKernel::Launch(const std::vector<AddressPtr> &inputs, const std::
|
|||
size_t offset = n * c_size * hw_size + c * hw_size;
|
||||
size_t hw = 0;
|
||||
#ifdef ENABLE_AVX
|
||||
constexpr size_t C8NUM = 8;
|
||||
const size_t C8NUM = 8;
|
||||
size_t hw8 = hw_size / C8NUM * C8NUM;
|
||||
const float *in_ptr = src_addr + offset;
|
||||
float *out_ptr = output_addr + offset;
|
||||
|
|
|
@ -1,113 +0,0 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "backend/kernel_compiler/cpu/cache_swap_hashmap_cpu_kernel.h"
|
||||
#include <string>
|
||||
#include "runtime/device/cpu/cpu_device_address.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace kernel {
|
||||
template <typename T>
|
||||
void Compress(HashmapEntry<T> *entry_p, const size_t &length, T entry) {
|
||||
T i = (entry + 1) % length, off = 1;
|
||||
for (; !entry_p[i].IsEmpty(); i = (i + 1) % length, off++) {
|
||||
if (entry_p[i].tag > off) {
|
||||
entry_p[entry].key = entry_p[i].key;
|
||||
entry_p[entry].value = entry_p[i].value;
|
||||
entry_p[entry].step = entry_p[i].step;
|
||||
entry_p[entry].tag = entry_p[i].tag - off;
|
||||
entry_p[i].SetEmpty();
|
||||
off = 0;
|
||||
entry = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CacheSwapHashmapCPUKernel::InitKernel(const CNodePtr &kernel_node) {
|
||||
auto hashmap_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
|
||||
auto emb_idx_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1);
|
||||
|
||||
if (hashmap_shape.size() != 2) {
|
||||
MS_LOG(EXCEPTION) << "Dimension of HashMap must be 2, (n, 4)";
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < emb_idx_shape.size(); ++i) {
|
||||
batch_size_ *= emb_idx_shape[i];
|
||||
}
|
||||
|
||||
hashmap_length_ = hashmap_shape[0];
|
||||
if (hashmap_length_ <= 0) {
|
||||
MS_LOG(EXCEPTION) << "Hashmap length must > 0";
|
||||
}
|
||||
dtype_ = AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, 0);
|
||||
}
|
||||
|
||||
bool CacheSwapHashmapCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs,
|
||||
const std::vector<kernel::AddressPtr> & /*workspace*/,
|
||||
const std::vector<kernel::AddressPtr> &outputs) {
|
||||
if (dtype_ == kNumberTypeInt32) {
|
||||
LaunchKernel<int>(inputs, outputs);
|
||||
} else if (dtype_ == kNumberTypeInt64) {
|
||||
LaunchKernel<int64_t>(inputs, outputs);
|
||||
} else {
|
||||
MS_LOG(ERROR) << "Only support int32, int64";
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void CacheSwapHashmapCPUKernel::LaunchKernel(const std::vector<AddressPtr> &inputs,
|
||||
const std::vector<kernel::AddressPtr> &outputs) {
|
||||
HashmapEntry<T> *hashmap = reinterpret_cast<HashmapEntry<T> *>(inputs[0]->addr);
|
||||
auto miss_emb_idx = reinterpret_cast<T *>(inputs[1]->addr);
|
||||
step_ = *reinterpret_cast<T *>(inputs[2]->addr);
|
||||
auto swap_cache_idx = reinterpret_cast<T *>(outputs[0]->addr);
|
||||
auto old_emb_idx = reinterpret_cast<T *>(outputs[1]->addr);
|
||||
|
||||
for (size_t i = 0; i < batch_size_; ++i) {
|
||||
if (miss_emb_idx[i] < 0) {
|
||||
swap_cache_idx[i] = -1;
|
||||
old_emb_idx[i] = -1;
|
||||
} else {
|
||||
T emb_idx = miss_emb_idx[i];
|
||||
T entry = HashFunc(emb_idx, hashmap_length_);
|
||||
T tag_count = 1;
|
||||
while (!hashmap[entry].IsEmpty()) {
|
||||
entry = (entry + 1) % hashmap_length_;
|
||||
tag_count++;
|
||||
}
|
||||
|
||||
hashmap[entry].key = emb_idx;
|
||||
hashmap[entry].step = step_;
|
||||
hashmap[entry].tag = tag_count;
|
||||
|
||||
T tmp_entry = (entry + 1) % hashmap_length_;
|
||||
|
||||
while (hashmap[tmp_entry].IsEmpty() || hashmap[tmp_entry].IsUsing(step_)) {
|
||||
tmp_entry = (tmp_entry + 1) % hashmap_length_;
|
||||
}
|
||||
|
||||
swap_cache_idx[i] = hashmap[tmp_entry].value;
|
||||
old_emb_idx[i] = hashmap[tmp_entry].key;
|
||||
hashmap[entry].value = swap_cache_idx[i];
|
||||
hashmap[tmp_entry].SetEmpty();
|
||||
Compress(hashmap, hashmap_length_, tmp_entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
|
@ -1,87 +0,0 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_CACHE_SWAP_HASHMAP_CPU_KERNEL_H_
|
||||
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_CACHE_SWAP_HASHMAP_CPU_KERNEL_H_
|
||||
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include "backend/kernel_compiler/cpu/cpu_kernel.h"
|
||||
#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h"
|
||||
#include "backend/kernel_compiler/cpu/search_cache_idx_cpu_kernel.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace kernel {
|
||||
class CacheSwapHashmapCPUKernel : public CPUKernel {
|
||||
public:
|
||||
CacheSwapHashmapCPUKernel() = default;
|
||||
~CacheSwapHashmapCPUKernel() override = default;
|
||||
|
||||
void InitKernel(const CNodePtr &kernel_node) override;
|
||||
|
||||
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
|
||||
const std::vector<AddressPtr> &outputs) override;
|
||||
|
||||
template <typename T>
|
||||
void LaunchKernel(const std::vector<AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &outputs);
|
||||
|
||||
private:
|
||||
size_t batch_size_{1};
|
||||
size_t hashmap_length_{1};
|
||||
int64_t step_{0};
|
||||
|
||||
TypeId dtype_{kTypeUnknown};
|
||||
};
|
||||
|
||||
MS_REG_CPU_KERNEL(CacheSwapHashmap,
|
||||
KernelAttr()
|
||||
.AddInputAttr(kNumberTypeInt32)
|
||||
.AddInputAttr(kNumberTypeInt32)
|
||||
.AddInputAttr(kNumberTypeInt32)
|
||||
.AddOutputAttr(kNumberTypeInt32)
|
||||
.AddOutputAttr(kNumberTypeInt32),
|
||||
CacheSwapHashmapCPUKernel);
|
||||
|
||||
MS_REG_CPU_KERNEL(CacheSwapHashmap,
|
||||
KernelAttr()
|
||||
.AddInputAttr(kNumberTypeInt64)
|
||||
.AddInputAttr(kNumberTypeInt64)
|
||||
.AddInputAttr(kNumberTypeInt32)
|
||||
.AddOutputAttr(kNumberTypeInt64)
|
||||
.AddOutputAttr(kNumberTypeInt64),
|
||||
CacheSwapHashmapCPUKernel);
|
||||
|
||||
MS_REG_CPU_KERNEL(CacheSwapHashmap,
|
||||
KernelAttr()
|
||||
.AddInputAttr(kNumberTypeInt64)
|
||||
.AddInputAttr(kNumberTypeInt64)
|
||||
.AddInputAttr(kNumberTypeInt64)
|
||||
.AddOutputAttr(kNumberTypeInt64)
|
||||
.AddOutputAttr(kNumberTypeInt64),
|
||||
CacheSwapHashmapCPUKernel);
|
||||
|
||||
MS_REG_CPU_KERNEL(CacheSwapHashmap,
|
||||
KernelAttr()
|
||||
.AddInputAttr(kNumberTypeInt32)
|
||||
.AddInputAttr(kNumberTypeInt32)
|
||||
.AddInputAttr(kNumberTypeInt64)
|
||||
.AddOutputAttr(kNumberTypeInt32)
|
||||
.AddOutputAttr(kNumberTypeInt32),
|
||||
CacheSwapHashmapCPUKernel);
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_CACHE_SWAP_HASHMAP_CPU_KERNEL_H_
|
|
@ -39,8 +39,7 @@ void CastCPUKernel<S, T>::InitKernel(const CNodePtr &kernel_node) {
|
|||
}
|
||||
|
||||
template <typename S, typename T>
|
||||
bool CastCPUKernel<S, T>::Launch(const std::vector<kernel::AddressPtr> &inputs,
|
||||
const std::vector<kernel::AddressPtr> & /*workspace*/,
|
||||
bool CastCPUKernel<S, T>::Launch(const std::vector<kernel::AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &,
|
||||
const std::vector<kernel::AddressPtr> &outputs) {
|
||||
S *input = reinterpret_cast<S *>(inputs[0]->addr);
|
||||
T *output = reinterpret_cast<T *>(outputs[0]->addr);
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
#include "backend/kernel_compiler/cpu/cpu_kernel.h"
|
||||
#include <algorithm>
|
||||
#include <utility>
|
||||
#include "common/thread_pool.h"
|
||||
|
||||
namespace mindspore {
|
||||
|
@ -119,5 +121,118 @@ std::vector<size_t> CPUKernelUtils::FlatShapeByAxis(const std::vector<size_t> &s
|
|||
return flat_shape;
|
||||
}
|
||||
|
||||
BroadcastIterator::BroadcastIterator(std::vector<size_t> input_shape_a, std::vector<size_t> input_shape_b,
|
||||
std::vector<size_t> output_shape)
|
||||
: input_shape_a_(std::move(input_shape_a)),
|
||||
input_shape_b_(std::move(input_shape_b)),
|
||||
output_shape_(std::move(output_shape)) {
|
||||
output_dimension_ = SizeToInt(output_shape_.size()); // Assign dimension to int for iterator
|
||||
BroadcastShape();
|
||||
// Allocate strides memory
|
||||
input_strides_a_.resize(output_dimension_);
|
||||
input_strides_b_.resize(output_dimension_);
|
||||
input_back_strides_a_.resize(output_dimension_);
|
||||
input_back_strides_b_.resize(output_dimension_);
|
||||
coordinates_.resize(output_dimension_);
|
||||
InitStrides();
|
||||
}
|
||||
|
||||
void BroadcastIterator::SetPos(size_t pos) {
|
||||
for (int i = output_dimension_ - 1; i >= 0 && pos != 0; --i) {
|
||||
coordinates_[i] = pos % output_shape_[i];
|
||||
input_pos_[0] += coordinates_[i] * input_strides_a_[i];
|
||||
input_pos_[1] += coordinates_[i] * input_strides_b_[i];
|
||||
pos /= output_shape_[i];
|
||||
}
|
||||
}
|
||||
|
||||
void BroadcastIterator::GenNextPos() {
|
||||
// Calculate output next coordinate
|
||||
for (int i = output_dimension_ - 1; i >= 0; --i) {
|
||||
if (coordinates_[i] + 1 == output_shape_[i]) {
|
||||
coordinates_[i] = 0;
|
||||
input_pos_[0] -= input_back_strides_a_[i];
|
||||
input_pos_[1] -= input_back_strides_b_[i];
|
||||
} else {
|
||||
++coordinates_[i];
|
||||
input_pos_[0] += input_strides_a_[i];
|
||||
input_pos_[1] += input_strides_b_[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void BroadcastIterator::BroadcastShape() {
|
||||
int input_dimension_a = input_shape_a_.size();
|
||||
if (input_dimension_a < output_dimension_) {
|
||||
input_shape_a_.insert(input_shape_a_.begin(), output_dimension_ - input_dimension_a, 1);
|
||||
}
|
||||
|
||||
int input_dimension_b = input_shape_b_.size();
|
||||
if (input_dimension_b < output_dimension_) {
|
||||
input_shape_b_.insert(input_shape_b_.begin(), output_dimension_ - input_dimension_b, 1);
|
||||
}
|
||||
}
|
||||
|
||||
void BroadcastIterator::InitStrides() {
|
||||
input_strides_a_[output_dimension_ - 1] = 1;
|
||||
input_strides_b_[output_dimension_ - 1] = 1;
|
||||
for (int i = output_dimension_ - 2; i >= 0; --i) {
|
||||
input_strides_a_[i] = input_shape_a_[i + 1] * input_strides_a_[i + 1];
|
||||
input_strides_b_[i] = input_shape_b_[i + 1] * input_strides_b_[i + 1];
|
||||
input_back_strides_a_[i + 1] = (input_shape_a_[i + 1] - 1) * input_strides_a_[i + 1];
|
||||
input_back_strides_b_[i + 1] = (input_shape_b_[i + 1] - 1) * input_strides_b_[i + 1];
|
||||
}
|
||||
|
||||
// Update strides for broadcast
|
||||
// While the axis value is 1, the stride is 0
|
||||
std::transform(input_strides_a_.begin(), input_strides_a_.end(), input_shape_a_.begin(), input_strides_a_.begin(),
|
||||
[](const auto &a, const auto &b) { return b == 1 ? 0 : a; });
|
||||
std::transform(input_strides_b_.begin(), input_strides_b_.end(), input_shape_b_.begin(), input_strides_b_.begin(),
|
||||
[](const auto &a, const auto &b) { return b == 1 ? 0 : a; });
|
||||
}
|
||||
|
||||
TransposeIterator::TransposeIterator(std::vector<size_t> output_shape, std::vector<size_t> axes,
|
||||
const std::vector<size_t> &input_shape)
|
||||
: shape_(std::move(output_shape)), axes_(std::move(axes)) {
|
||||
// Calculate strides
|
||||
dimension_ = shape_.size();
|
||||
std::vector<uint32_t> strides(dimension_, 1);
|
||||
for (int i = dimension_ - 2; i >= 0; --i) {
|
||||
strides[i] = input_shape[i + 1] * strides[i + 1];
|
||||
}
|
||||
|
||||
// Swap shape ans strides and calculate back strides
|
||||
strides_.resize(dimension_);
|
||||
back_strides_.resize(dimension_);
|
||||
for (int i = dimension_ - 1; i >= 0; --i) {
|
||||
strides_[i] = strides[axes_[i]];
|
||||
back_strides_[i] = (shape_[i] - 1) * strides_[i];
|
||||
}
|
||||
|
||||
// Calculate coordinate by pos
|
||||
coordinates_.resize(dimension_);
|
||||
}
|
||||
|
||||
void TransposeIterator::SetPos(size_t pos) {
|
||||
for (int i = dimension_ - 1; i >= 0 && pos != 0; --i) {
|
||||
coordinates_[i] = pos % shape_[i];
|
||||
pos_ += coordinates_[i] * strides_[i];
|
||||
pos /= shape_[i];
|
||||
}
|
||||
}
|
||||
|
||||
void TransposeIterator::GenNextPos() {
|
||||
for (int i = dimension_ - 1; i >= 0; --i) {
|
||||
if (coordinates_[i] + 1 == shape_[i]) {
|
||||
coordinates_[i] = 0;
|
||||
pos_ -= back_strides_[i];
|
||||
} else {
|
||||
coordinates_[i]++;
|
||||
pos_ += strides_[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -145,6 +145,50 @@ class CPUKernelUtils {
|
|||
static void ParallelFor(const CTask &task, size_t count);
|
||||
static std::vector<size_t> FlatShapeByAxis(const std::vector<size_t> &shape, int axis);
|
||||
};
|
||||
|
||||
class BroadcastIterator {
|
||||
public:
|
||||
BroadcastIterator(std::vector<size_t> input_shape_a, std::vector<size_t> input_shape_b,
|
||||
std::vector<size_t> output_shape);
|
||||
virtual ~BroadcastIterator() = default;
|
||||
inline size_t GetInputPosA() const { return input_pos_[0]; }
|
||||
inline size_t GetInputPosB() const { return input_pos_[1]; }
|
||||
void SetPos(size_t pos);
|
||||
void GenNextPos();
|
||||
|
||||
private:
|
||||
void BroadcastShape();
|
||||
void InitStrides();
|
||||
|
||||
std::vector<size_t> coordinates_;
|
||||
std::vector<size_t> input_shape_a_;
|
||||
std::vector<size_t> input_shape_b_;
|
||||
std::vector<size_t> output_shape_;
|
||||
std::vector<size_t> input_strides_a_;
|
||||
std::vector<size_t> input_strides_b_;
|
||||
std::vector<size_t> input_back_strides_a_;
|
||||
std::vector<size_t> input_back_strides_b_;
|
||||
std::array<size_t, 2> input_pos_{0};
|
||||
int output_dimension_{0};
|
||||
};
|
||||
|
||||
class TransposeIterator {
|
||||
public:
|
||||
TransposeIterator(std::vector<size_t> output_shape, std::vector<size_t> axes, const std::vector<size_t> &input_shape);
|
||||
virtual ~TransposeIterator() = default;
|
||||
inline size_t GetPos() const { return pos_; }
|
||||
void SetPos(size_t pos);
|
||||
void GenNextPos();
|
||||
|
||||
private:
|
||||
int dimension_{0};
|
||||
std::vector<size_t> coordinates_;
|
||||
std::vector<size_t> shape_;
|
||||
std::vector<size_t> strides_;
|
||||
std::vector<size_t> back_strides_;
|
||||
std::vector<size_t> axes_;
|
||||
size_t pos_{0};
|
||||
};
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
||||
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace kernel {
|
||||
|
||||
void CTCLossCPUKernel::InitKernel(const CNodePtr &kernel_node) {
|
||||
CheckParam(kernel_node);
|
||||
probs_shape_ = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
|
||||
|
@ -158,7 +157,6 @@ void CTCLossCPUKernel::CalculateGrad(const std::vector<uint32_t> &label_with_bla
|
|||
std::vector<std::vector<TT>> *dy) {
|
||||
auto dy_b = dy;
|
||||
TT kLogZero_ = -std::numeric_limits<TT>::infinity();
|
||||
|
||||
if (log_pzx == kLogZero_) {
|
||||
MS_LOG(INFO) << "No valid path found";
|
||||
return;
|
||||
|
@ -181,7 +179,7 @@ void CTCLossCPUKernel::CalculateGrad(const std::vector<uint32_t> &label_with_bla
|
|||
}
|
||||
}
|
||||
|
||||
void CTCLossCPUKernel::GenLableWithBlank(uint32_t *seq_len, const std::vector<std::vector<uint32_t>> &batch_label,
|
||||
void CTCLossCPUKernel::GenLableWithBlank(const uint32_t *seq_len, const std::vector<std::vector<uint32_t>> &batch_label,
|
||||
std::vector<std::vector<uint32_t>> *label_with_blank) {
|
||||
for (size_t b = 0; b < batch_size_; ++b) {
|
||||
std::vector<uint32_t> l;
|
||||
|
@ -216,7 +214,7 @@ void CTCLossCPUKernel::GenLableWithBlank(uint32_t *seq_len, const std::vector<st
|
|||
}
|
||||
|
||||
template <typename T>
|
||||
void InnerSoftMax(T *inputs_addr, std::vector<std::vector<T>> *softmax_probs, const uint32_t sequence_length,
|
||||
void InnerSoftMax(const T *inputs_addr, std::vector<std::vector<T>> *softmax_probs, const uint32_t sequence_length,
|
||||
size_t num_class, size_t batch_size, size_t b) {
|
||||
for (size_t t = 0; t < sequence_length; ++t) {
|
||||
T maxCoeff(T(0));
|
||||
|
|
|
@ -36,7 +36,7 @@ class CTCLossCPUKernel : public CPUKernel {
|
|||
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
|
||||
const std::vector<AddressPtr> &outputs) override;
|
||||
|
||||
void GenLableWithBlank(uint32_t *seq_len, const std::vector<std::vector<uint32_t>> &batch_label,
|
||||
void GenLableWithBlank(const uint32_t *seq_len, const std::vector<std::vector<uint32_t>> &batch_label,
|
||||
std::vector<std::vector<uint32_t>> *label_with_blank);
|
||||
|
||||
template <typename T>
|
||||
|
@ -87,7 +87,6 @@ MS_REG_CPU_KERNEL(CTCLoss,
|
|||
.AddOutputAttr(kNumberTypeFloat32)
|
||||
.AddOutputAttr(kNumberTypeFloat32),
|
||||
CTCLossCPUKernel);
|
||||
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_CTCLOSS_CPU_KERNEL_H_
|
||||
|
|
|
@ -54,7 +54,6 @@ MS_REG_CPU_KERNEL(
|
|||
Dropout,
|
||||
KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
|
||||
DropoutCPUKernel);
|
||||
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_DROPOUT_CPU_KERNEL_H_
|
||||
|
|
|
@ -14,15 +14,16 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
#include <cmath>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <map>
|
||||
#include "backend/kernel_compiler/cpu/eltwise_grad_cpu_kernel.h"
|
||||
#include "common/thread_pool.h"
|
||||
#include "runtime/device/cpu/cpu_device_address.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace kernel {
|
||||
|
||||
template <typename T>
|
||||
void EltWiseGradCPUKernel::ReluGrad(const T *input1, const T *input2, T *out, size_t start, size_t end) {
|
||||
void EltWiseGradCPUKernel<T>::ReluGrad(const T *input1, const T *input2, T *out, size_t start, size_t end) {
|
||||
for (size_t i = start; i < end; i++) {
|
||||
if (input2[i] > 0) {
|
||||
out[i] = input1[i];
|
||||
|
@ -33,7 +34,7 @@ void EltWiseGradCPUKernel::ReluGrad(const T *input1, const T *input2, T *out, si
|
|||
}
|
||||
|
||||
template <typename T>
|
||||
void EltWiseGradCPUKernel::ReLU6Grad(const T *input1, const T *input2, T *out, size_t start, size_t end) {
|
||||
void EltWiseGradCPUKernel<T>::ReLU6Grad(const T *input1, const T *input2, T *out, size_t start, size_t end) {
|
||||
for (size_t i = start; i < end; i++) {
|
||||
if (input2[i] > 0 && input2[i] <= 6) {
|
||||
out[i] = input1[i];
|
||||
|
@ -44,7 +45,7 @@ void EltWiseGradCPUKernel::ReLU6Grad(const T *input1, const T *input2, T *out, s
|
|||
}
|
||||
|
||||
template <typename T>
|
||||
void EltWiseGradCPUKernel::AbsGrad(const T *input1, const T *input2, T *out, size_t start, size_t end) {
|
||||
void EltWiseGradCPUKernel<T>::AbsGrad(const T *input1, const T *input2, T *out, size_t start, size_t end) {
|
||||
for (size_t i = start; i < end; i++) {
|
||||
if (input1[i] > 0) {
|
||||
out[i] = input2[i];
|
||||
|
@ -57,21 +58,21 @@ void EltWiseGradCPUKernel::AbsGrad(const T *input1, const T *input2, T *out, siz
|
|||
}
|
||||
|
||||
template <typename T>
|
||||
void EltWiseGradCPUKernel::SigmoidGrad(const T *input1, const T *input2, T *out, size_t start, size_t end) {
|
||||
void EltWiseGradCPUKernel<T>::SigmoidGrad(const T *input1, const T *input2, T *out, size_t start, size_t end) {
|
||||
for (size_t i = start; i < end; i++) {
|
||||
out[i] = input2[i] * input1[i] * (1 - input1[i]);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void EltWiseGradCPUKernel::SqrtGrad(const T *input1, const T *input2, T *out, size_t start, size_t end) {
|
||||
void EltWiseGradCPUKernel<T>::SqrtGrad(const T *input1, const T *input2, T *out, size_t start, size_t end) {
|
||||
for (size_t i = start; i < end; i++) {
|
||||
out[i] = input2[i] / (input1[i] * 2);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void EltWiseGradCPUKernel::TanhGrad(const T *input1, const T *input2, T *out, size_t start, size_t end) {
|
||||
void EltWiseGradCPUKernel<T>::TanhGrad(const T *input1, const T *input2, T *out, size_t start, size_t end) {
|
||||
for (size_t i = start; i < end; i++) {
|
||||
T tmp = input1[i] * input1[i];
|
||||
out[i] = input2[i] * (1 - tmp);
|
||||
|
@ -79,7 +80,7 @@ void EltWiseGradCPUKernel::TanhGrad(const T *input1, const T *input2, T *out, si
|
|||
}
|
||||
|
||||
template <typename T>
|
||||
void EltWiseGradCPUKernel::GeluGrad(const T *input1, const T *input2, T *out, size_t start, size_t end) {
|
||||
void EltWiseGradCPUKernel<T>::GeluGrad(const T *input1, const T *input2, T *out, size_t start, size_t end) {
|
||||
for (size_t i = start; i < end; i++) {
|
||||
T x = input2[i];
|
||||
auto double_x = static_cast<T>(x);
|
||||
|
@ -91,7 +92,7 @@ void EltWiseGradCPUKernel::GeluGrad(const T *input1, const T *input2, T *out, si
|
|||
}
|
||||
|
||||
template <typename T>
|
||||
void EltWiseGradCPUKernel::AsinGrad(const T *input1, const T *input2, T *out, size_t start, size_t end) {
|
||||
void EltWiseGradCPUKernel<T>::AsinGrad(const T *input1, const T *input2, T *out, size_t start, size_t end) {
|
||||
for (size_t i = start; i < end; i++) {
|
||||
T dividend = input2[i];
|
||||
T divisor = sqrt(1 - input1[i] * input1[i]);
|
||||
|
@ -112,7 +113,7 @@ void EltWiseGradCPUKernel::AsinGrad(const T *input1, const T *input2, T *out, si
|
|||
}
|
||||
|
||||
template <typename T>
|
||||
void EltWiseGradCPUKernel::ACosGrad(const T *input1, const T *input2, T *out, size_t start, size_t end) {
|
||||
void EltWiseGradCPUKernel<T>::ACosGrad(const T *input1, const T *input2, T *out, size_t start, size_t end) {
|
||||
for (size_t i = start; i < end; i++) {
|
||||
T dividend = -input2[i];
|
||||
T divisor = sqrt(1 - input1[i] * input1[i]);
|
||||
|
@ -133,10 +134,10 @@ void EltWiseGradCPUKernel::ACosGrad(const T *input1, const T *input2, T *out, si
|
|||
}
|
||||
|
||||
template <typename T>
|
||||
void EltWiseGradCPUKernel::AtanGrad(const T *input1, const T *input2, T *out, size_t start, size_t end) {
|
||||
void EltWiseGradCPUKernel<T>::AtanGrad(const T *input1, const T *input2, T *out, size_t start, size_t end) {
|
||||
for (size_t i = start; i < end; i++) {
|
||||
T dividend = input2[i];
|
||||
T divisor = 1 + input1[i] * input1[i];
|
||||
const T divisor = 1 + input1[i] * input1[i];
|
||||
if (divisor == 0) {
|
||||
if (dividend == 0) {
|
||||
out[i] = std::numeric_limits<T>::quiet_NaN();
|
||||
|
@ -154,7 +155,7 @@ void EltWiseGradCPUKernel::AtanGrad(const T *input1, const T *input2, T *out, si
|
|||
}
|
||||
|
||||
template <typename T>
|
||||
void EltWiseGradCPUKernel::AsinhGrad(const T *input1, const T *input2, T *out, size_t start, size_t end) {
|
||||
void EltWiseGradCPUKernel<T>::AsinhGrad(const T *input1, const T *input2, T *out, size_t start, size_t end) {
|
||||
for (size_t i = start; i < end; i++) {
|
||||
T dividend = input2[i];
|
||||
T divisor = sqrt(1 + input1[i] * input1[i]);
|
||||
|
@ -175,7 +176,7 @@ void EltWiseGradCPUKernel::AsinhGrad(const T *input1, const T *input2, T *out, s
|
|||
}
|
||||
|
||||
template <typename T>
|
||||
void EltWiseGradCPUKernel::AcoshGrad(const T *input1, const T *input2, T *out, size_t start, size_t end) {
|
||||
void EltWiseGradCPUKernel<T>::AcoshGrad(const T *input1, const T *input2, T *out, size_t start, size_t end) {
|
||||
for (size_t i = start; i < end; i++) {
|
||||
T dividend = input2[i];
|
||||
T divisor = sqrt(input1[i] * input1[i] - 1);
|
||||
|
@ -195,132 +196,46 @@ void EltWiseGradCPUKernel::AcoshGrad(const T *input1, const T *input2, T *out, s
|
|||
}
|
||||
}
|
||||
|
||||
void EltWiseGradCPUKernel::InitKernel(const CNodePtr &kernel_node) {
|
||||
template <typename T>
|
||||
void EltWiseGradCPUKernel<T>::InitKernel(const CNodePtr &kernel_node) {
|
||||
MS_EXCEPTION_IF_NULL(kernel_node);
|
||||
std::string kernel_name = AnfAlgo::GetCNodeName(kernel_node);
|
||||
if (kernel_name == "ReluGrad") {
|
||||
operate_type_ = RELUGRAD;
|
||||
} else if (kernel_name == "ReLU6Grad") {
|
||||
operate_type_ = RELU6GRAD;
|
||||
} else if (kernel_name == "SigmoidGrad") {
|
||||
operate_type_ = SIGMOIDGRAD;
|
||||
} else if (kernel_name == "AbsGrad") {
|
||||
operate_type_ = ABSGRAD;
|
||||
} else if (kernel_name == "TanhGrad") {
|
||||
operate_type_ = TANHGRAD;
|
||||
} else if (kernel_name == "SqrtGrad") {
|
||||
operate_type_ = SQRTGRAD;
|
||||
} else if (kernel_name == "GeLUGrad") {
|
||||
operate_type_ = GELUGRAD;
|
||||
} else if (kernel_name == "AsinGrad") {
|
||||
operate_type_ = ASINGRAD;
|
||||
} else if (kernel_name == "ACosGrad") {
|
||||
operate_type_ = ACOSGRAD;
|
||||
} else if (kernel_name == "AtanGrad") {
|
||||
operate_type_ = ATANGRAD;
|
||||
} else if (kernel_name == "AsinhGrad") {
|
||||
operate_type_ = ASINHGRAD;
|
||||
} else if (kernel_name == "AcoshGrad") {
|
||||
operate_type_ = ACOSHGRAD;
|
||||
} else {
|
||||
MS_LOG(EXCEPTION) << "Not support " << kernel_name;
|
||||
}
|
||||
|
||||
input_shape0_ = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
|
||||
input_shape1_ = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1);
|
||||
output_shape_ = AnfAlgo::GetOutputInferShape(kernel_node, 0);
|
||||
if (output_shape_.size() == 0) {
|
||||
output_shape_.insert(output_shape_.begin(), 1);
|
||||
}
|
||||
size_t l = input_shape0_.size();
|
||||
for (size_t i = 0; i < output_shape_.size() - l; ++i) {
|
||||
input_shape0_.insert(input_shape0_.begin(), 1);
|
||||
}
|
||||
l = input_shape1_.size();
|
||||
for (size_t i = 0; i < output_shape_.size() - l; ++i) {
|
||||
input_shape1_.insert(input_shape1_.begin(), 1);
|
||||
}
|
||||
CPUKernelUtils::GetElementNumEveryDim(input_shape0_, &input_element_num0_);
|
||||
CPUKernelUtils::GetElementNumEveryDim(input_shape1_, &input_element_num1_);
|
||||
CPUKernelUtils::GetElementNumEveryDim(output_shape_, &output_element_num_);
|
||||
dtype_ = AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, 0);
|
||||
if (dtype_ != AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, 1)) {
|
||||
MS_LOG(EXCEPTION) << "Input0 and input1 must has the same data type";
|
||||
}
|
||||
}
|
||||
|
||||
bool EltWiseGradCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs,
|
||||
const std::vector<kernel::AddressPtr> & /*workspace*/,
|
||||
const std::vector<kernel::AddressPtr> &outputs) {
|
||||
if (dtype_ == kNumberTypeInt32 || dtype_ == kNumberTypeInt16) {
|
||||
LaunchKernel<int>(inputs, outputs);
|
||||
} else if (dtype_ == kNumberTypeFloat32 || dtype_ == kNumberTypeFloat16 || dtype_ == kNumberTypeFloat64) {
|
||||
LaunchKernel<float>(inputs, outputs);
|
||||
} else if (dtype_ == kNumberTypeInt64) {
|
||||
LaunchKernel<int64_t>(inputs, outputs);
|
||||
} else {
|
||||
MS_LOG(EXCEPTION) << "Data type is " << TypeIdLabel(dtype_) << "is not support.";
|
||||
}
|
||||
return true;
|
||||
kernel_name_ = AnfAlgo::GetCNodeName(kernel_node);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void EltWiseGradCPUKernel::LaunchKernel(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &outputs) {
|
||||
bool EltWiseGradCPUKernel<T>::Launch(const std::vector<kernel::AddressPtr> &inputs,
|
||||
const std::vector<kernel::AddressPtr> & /*workspace*/,
|
||||
const std::vector<kernel::AddressPtr> &outputs) {
|
||||
static const std::map<std::string,
|
||||
std::function<void(EltWiseGradCPUKernel *, const T *, const T *, T *, size_t, size_t)>>
|
||||
elt_map{{"ReluGrad", &EltWiseGradCPUKernel<T>::ReluGrad}, {"ReLU6Grad", &EltWiseGradCPUKernel<T>::ReLU6Grad},
|
||||
{"SigmoidGrad", &EltWiseGradCPUKernel<T>::SigmoidGrad}, {"AbsGrad", &EltWiseGradCPUKernel<T>::AbsGrad},
|
||||
{"TanhGrad", &EltWiseGradCPUKernel<T>::TanhGrad}, {"SqrtGrad", &EltWiseGradCPUKernel<T>::SqrtGrad},
|
||||
{"GeLUGrad", &EltWiseGradCPUKernel<T>::GeluGrad}, {"AsinGrad", &EltWiseGradCPUKernel<T>::AsinGrad},
|
||||
{"ACosGrad", &EltWiseGradCPUKernel<T>::ACosGrad}, {"AtanGrad", &EltWiseGradCPUKernel<T>::AtanGrad},
|
||||
{"AsinhGrad", &EltWiseGradCPUKernel<T>::AsinhGrad}, {"AcoshGrad", &EltWiseGradCPUKernel<T>::AcoshGrad}};
|
||||
T *input1 = reinterpret_cast<T *>(inputs[0]->addr);
|
||||
T *input2 = reinterpret_cast<T *>(inputs[1]->addr);
|
||||
T *output = reinterpret_cast<T *>(outputs[0]->addr);
|
||||
|
||||
size_t lens = outputs[0]->size > 0 ? static_cast<size_t>(outputs[0]->size / sizeof(T)) : 1;
|
||||
auto max_thread_num = std::thread::hardware_concurrency();
|
||||
size_t thread_num = lens < 128 * max_thread_num ? std::ceil(lens / 128.0) : max_thread_num;
|
||||
MS_LOG(INFO) << "Lens=" << lens << "; use thread_num=" << thread_num << "; max_thread_num: " << max_thread_num;
|
||||
std::vector<std::thread> threads;
|
||||
if (thread_num < 1) {
|
||||
MS_LOG(ERROR) << "Invalid value: thread_num " << thread_num;
|
||||
return;
|
||||
}
|
||||
threads.reserve(thread_num);
|
||||
size_t count = outputs[0]->size > 0 ? static_cast<size_t>(outputs[0]->size / sizeof(T)) : 1;
|
||||
auto max_thread_num = common::ThreadPool::GetInstance().GetSyncRunThreadNum();
|
||||
const float block_size = 128.0;
|
||||
size_t thread_num = count < block_size * max_thread_num ? std::ceil(count / block_size) : max_thread_num;
|
||||
std::vector<common::Task> tasks;
|
||||
size_t start = 0;
|
||||
size_t once_compute_size = (lens + thread_num - 1) / thread_num;
|
||||
if (once_compute_size < 1) {
|
||||
MS_LOG(ERROR) << "Invalid value: once_compute_size " << once_compute_size;
|
||||
return;
|
||||
}
|
||||
while (start < lens) {
|
||||
size_t end = (start + once_compute_size) > lens ? lens : (start + once_compute_size);
|
||||
if (operate_type_ == RELUGRAD) {
|
||||
threads.emplace_back(std::thread(&EltWiseGradCPUKernel::ReluGrad<T>, this, input1, input2, output, start, end));
|
||||
} else if (operate_type_ == RELU6GRAD) {
|
||||
threads.emplace_back(std::thread(&EltWiseGradCPUKernel::ReLU6Grad<T>, this, input1, input2, output, start, end));
|
||||
} else if (operate_type_ == ABSGRAD) {
|
||||
threads.emplace_back(std::thread(&EltWiseGradCPUKernel::AbsGrad<T>, this, input1, input2, output, start, end));
|
||||
} else if (operate_type_ == SIGMOIDGRAD) {
|
||||
threads.emplace_back(
|
||||
std::thread(&EltWiseGradCPUKernel::SigmoidGrad<T>, this, input1, input2, output, start, end));
|
||||
} else if (operate_type_ == TANHGRAD) {
|
||||
threads.emplace_back(std::thread(&EltWiseGradCPUKernel::TanhGrad<T>, this, input1, input2, output, start, end));
|
||||
} else if (operate_type_ == SQRTGRAD) {
|
||||
threads.emplace_back(std::thread(&EltWiseGradCPUKernel::SqrtGrad<T>, this, input1, input2, output, start, end));
|
||||
} else if (operate_type_ == GELUGRAD) {
|
||||
threads.emplace_back(std::thread(&EltWiseGradCPUKernel::GeluGrad<T>, this, input1, input2, output, start, end));
|
||||
} else if (operate_type_ == ASINGRAD) {
|
||||
threads.emplace_back(std::thread(&EltWiseGradCPUKernel::AsinGrad<T>, this, input1, input2, output, start, end));
|
||||
} else if (operate_type_ == ACOSGRAD) {
|
||||
threads.emplace_back(std::thread(&EltWiseGradCPUKernel::ACosGrad<T>, this, input1, input2, output, start, end));
|
||||
} else if (operate_type_ == ATANGRAD) {
|
||||
threads.emplace_back(std::thread(&EltWiseGradCPUKernel::AtanGrad<T>, this, input1, input2, output, start, end));
|
||||
} else if (operate_type_ == ASINHGRAD) {
|
||||
threads.emplace_back(std::thread(&EltWiseGradCPUKernel::AsinhGrad<T>, this, input1, input2, output, start, end));
|
||||
} else if (operate_type_ == ACOSHGRAD) {
|
||||
threads.emplace_back(std::thread(&EltWiseGradCPUKernel::AcoshGrad<T>, this, input1, input2, output, start, end));
|
||||
} else {
|
||||
MS_LOG(EXCEPTION) << "Not support " << operate_type_;
|
||||
}
|
||||
size_t once_compute_size = (count + thread_num - 1) / thread_num;
|
||||
while (start < count) {
|
||||
size_t end = (start + once_compute_size) > count ? count : (start + once_compute_size);
|
||||
auto block = [&, start, end]() {
|
||||
elt_map.at(kernel_name_)(this, input1, input2, output, start, end);
|
||||
return common::SUCCESS;
|
||||
};
|
||||
tasks.emplace_back(block);
|
||||
start += once_compute_size;
|
||||
}
|
||||
for (size_t i = 0; i < threads.size(); ++i) {
|
||||
threads[i].join();
|
||||
}
|
||||
common::ThreadPool::GetInstance().SyncRun(tasks);
|
||||
return true;
|
||||
}
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -18,11 +18,13 @@
|
|||
#include <memory>
|
||||
#include <vector>
|
||||
#include <limits>
|
||||
#include <string>
|
||||
#include "backend/kernel_compiler/cpu/cpu_kernel.h"
|
||||
#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace kernel {
|
||||
template <typename T>
|
||||
class EltWiseGradCPUKernel : public CPUKernel {
|
||||
public:
|
||||
EltWiseGradCPUKernel() = default;
|
||||
|
@ -32,95 +34,75 @@ class EltWiseGradCPUKernel : public CPUKernel {
|
|||
|
||||
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
|
||||
const std::vector<AddressPtr> &outputs) override;
|
||||
template <typename T>
|
||||
void LaunchKernel(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &outputs);
|
||||
|
||||
private:
|
||||
template <typename T>
|
||||
void ReluGrad(const T *input1, const T *input2, T *out, size_t start, size_t end);
|
||||
template <typename T>
|
||||
void ReLU6Grad(const T *input1, const T *input2, T *out, size_t start, size_t end);
|
||||
template <typename T>
|
||||
void AbsGrad(const T *input1, const T *input2, T *out, size_t start, size_t end);
|
||||
template <typename T>
|
||||
void SigmoidGrad(const T *input1, const T *input2, T *out, size_t start, size_t end);
|
||||
template <typename T>
|
||||
void SqrtGrad(const T *input1, const T *input2, T *out, size_t start, size_t end);
|
||||
template <typename T>
|
||||
void TanhGrad(const T *input1, const T *input2, T *out, size_t start, size_t end);
|
||||
template <typename T>
|
||||
void GeluGrad(const T *input1, const T *input2, T *out, size_t start, size_t end);
|
||||
template <typename T>
|
||||
void AsinGrad(const T *input1, const T *input2, T *out, size_t start, size_t end);
|
||||
template <typename T>
|
||||
void ACosGrad(const T *input1, const T *input2, T *out, size_t start, size_t end);
|
||||
template <typename T>
|
||||
void AtanGrad(const T *input1, const T *input2, T *out, size_t start, size_t end);
|
||||
template <typename T>
|
||||
void AsinhGrad(const T *input1, const T *input2, T *out, size_t start, size_t end);
|
||||
template <typename T>
|
||||
void AcoshGrad(const T *input1, const T *input2, T *out, size_t start, size_t end);
|
||||
std::vector<size_t> input_shape0_;
|
||||
std::vector<size_t> input_shape1_;
|
||||
std::vector<size_t> input_element_num0_;
|
||||
std::vector<size_t> input_element_num1_;
|
||||
std::vector<size_t> output_shape_;
|
||||
std::vector<size_t> output_element_num_;
|
||||
OperateType operate_type_{RELUGRAD};
|
||||
TypeId dtype_{kTypeUnknown};
|
||||
|
||||
std::string kernel_name_ = "";
|
||||
};
|
||||
|
||||
MS_REG_CPU_KERNEL(
|
||||
MS_REG_CPU_KERNEL_T(
|
||||
ReluGrad,
|
||||
KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
|
||||
EltWiseGradCPUKernel);
|
||||
MS_REG_CPU_KERNEL(
|
||||
EltWiseGradCPUKernel, float);
|
||||
MS_REG_CPU_KERNEL_T(
|
||||
ReLU6Grad,
|
||||
KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
|
||||
EltWiseGradCPUKernel);
|
||||
MS_REG_CPU_KERNEL(
|
||||
EltWiseGradCPUKernel, float);
|
||||
MS_REG_CPU_KERNEL_T(
|
||||
AbsGrad,
|
||||
KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
|
||||
EltWiseGradCPUKernel);
|
||||
MS_REG_CPU_KERNEL(
|
||||
EltWiseGradCPUKernel, float);
|
||||
MS_REG_CPU_KERNEL_T(
|
||||
SigmoidGrad,
|
||||
KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
|
||||
EltWiseGradCPUKernel);
|
||||
MS_REG_CPU_KERNEL(
|
||||
EltWiseGradCPUKernel, float);
|
||||
MS_REG_CPU_KERNEL_T(
|
||||
SqrtGrad,
|
||||
KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
|
||||
EltWiseGradCPUKernel);
|
||||
MS_REG_CPU_KERNEL(
|
||||
EltWiseGradCPUKernel, float);
|
||||
MS_REG_CPU_KERNEL_T(
|
||||
TanhGrad,
|
||||
KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
|
||||
EltWiseGradCPUKernel);
|
||||
MS_REG_CPU_KERNEL(GeLUGrad,
|
||||
KernelAttr()
|
||||
.AddInputAttr(kNumberTypeFloat32)
|
||||
.AddInputAttr(kNumberTypeFloat32)
|
||||
.AddInputAttr(kNumberTypeFloat32)
|
||||
.AddOutputAttr(kNumberTypeFloat32),
|
||||
EltWiseGradCPUKernel);
|
||||
MS_REG_CPU_KERNEL(
|
||||
EltWiseGradCPUKernel, float);
|
||||
MS_REG_CPU_KERNEL_T(GeLUGrad,
|
||||
KernelAttr()
|
||||
.AddInputAttr(kNumberTypeFloat32)
|
||||
.AddInputAttr(kNumberTypeFloat32)
|
||||
.AddInputAttr(kNumberTypeFloat32)
|
||||
.AddOutputAttr(kNumberTypeFloat32),
|
||||
EltWiseGradCPUKernel, float);
|
||||
MS_REG_CPU_KERNEL_T(
|
||||
AsinGrad,
|
||||
KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
|
||||
EltWiseGradCPUKernel);
|
||||
MS_REG_CPU_KERNEL(
|
||||
EltWiseGradCPUKernel, float);
|
||||
MS_REG_CPU_KERNEL_T(
|
||||
ACosGrad,
|
||||
KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
|
||||
EltWiseGradCPUKernel);
|
||||
MS_REG_CPU_KERNEL(
|
||||
EltWiseGradCPUKernel, float);
|
||||
MS_REG_CPU_KERNEL_T(
|
||||
AtanGrad,
|
||||
KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
|
||||
EltWiseGradCPUKernel);
|
||||
MS_REG_CPU_KERNEL(
|
||||
EltWiseGradCPUKernel, float);
|
||||
MS_REG_CPU_KERNEL_T(
|
||||
AsinhGrad,
|
||||
KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
|
||||
EltWiseGradCPUKernel);
|
||||
MS_REG_CPU_KERNEL(
|
||||
EltWiseGradCPUKernel, float);
|
||||
MS_REG_CPU_KERNEL_T(
|
||||
AcoshGrad,
|
||||
KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
|
||||
EltWiseGradCPUKernel);
|
||||
EltWiseGradCPUKernel, float);
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ size_t get_element_num(const std::vector<size_t> &shape) {
|
|||
}
|
||||
|
||||
template <typename T, typename I>
|
||||
void CopyTask(size_t cur, std::vector<size_t> *pos, T *input, I *index, const int &dim, T *output,
|
||||
void CopyTask(size_t cur, std::vector<size_t> *pos, T *input, const I *index, const int &dim, T *output,
|
||||
const std::vector<size_t> &output_shape, const std::vector<size_t> &out_cargo_size,
|
||||
const std::vector<size_t> &input_cargo_size, bool reverse) {
|
||||
for (size_t i = 0; i < output_shape[cur]; ++i) {
|
||||
|
@ -65,7 +65,6 @@ template <typename T, typename I>
|
|||
void GatherDCPUKernel<T, I>::InitKernel(const CNodePtr &kernel_node) {
|
||||
input_shape_ = AnfAlgo::GetInputDeviceShape(kernel_node, 0);
|
||||
index_shape_ = AnfAlgo::GetInputDeviceShape(kernel_node, 2);
|
||||
|
||||
if (input_shape_.size() != index_shape_.size()) {
|
||||
MS_LOG(EXCEPTION) << "Invalid shape size, shape size of input: " << input_shape_.size()
|
||||
<< ", and index: " << index_shape_.size() << " should be equal";
|
||||
|
@ -81,7 +80,6 @@ bool GatherDCPUKernel<T, I>::Launch(const std::vector<kernel::AddressPtr> &input
|
|||
size_t index_size = get_element_num(index_shape_) * sizeof(I);
|
||||
size_t dim_size = sizeof(int);
|
||||
size_t output_size = get_element_num(output_shape_) * sizeof(T);
|
||||
|
||||
if (inputs[0]->size != input_size || inputs[1]->size != dim_size || inputs[2]->size != index_size ||
|
||||
outputs[0]->size != output_size) {
|
||||
MS_LOG(EXCEPTION) << "invalid input or output data size!";
|
||||
|
@ -92,7 +90,6 @@ bool GatherDCPUKernel<T, I>::Launch(const std::vector<kernel::AddressPtr> &input
|
|||
auto index = reinterpret_cast<I *>(inputs[2]->addr);
|
||||
auto output = reinterpret_cast<T *>(outputs[0]->addr);
|
||||
int32_t input_rank = SizeToInt(input_shape_.size());
|
||||
|
||||
if (dim[0] >= input_rank || dim[0] < -input_rank) {
|
||||
MS_LOG(EXCEPTION) << "The value of 'dim' should be in [" << -input_rank << ", " << input_rank
|
||||
<< "], but got: " << dim[0];
|
||||
|
|
|
@ -37,7 +37,6 @@ class GatherDCPUKernel : public CPUKernel {
|
|||
std::vector<size_t> input_shape_;
|
||||
std::vector<size_t> index_shape_;
|
||||
std::vector<size_t> output_shape_;
|
||||
int32_t axis_;
|
||||
};
|
||||
|
||||
MS_REG_CPU_KERNEL_T_S(GatherD,
|
||||
|
|
|
@ -15,10 +15,10 @@
|
|||
*/
|
||||
#include "backend/kernel_compiler/cpu/gathernd_cpu_kernel.h"
|
||||
#include "runtime/device/cpu/cpu_device_address.h"
|
||||
#define MAX_INT (((unsigned int)(-1)) >> 1)
|
||||
|
||||
namespace mindspore {
|
||||
namespace kernel {
|
||||
|
||||
void GatherNdCPUKernel::InitKernel(const CNodePtr &kernel_node) {
|
||||
input_shapes_ = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
|
||||
indices_shapes_ = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1);
|
||||
|
@ -83,11 +83,14 @@ bool GatherNdCPUKernel::LaunchKernel(const std::vector<AddressPtr> &inputs, cons
|
|||
size_t output_dim1 = dims_[1];
|
||||
size_t indices_dim1 = dims_[2];
|
||||
|
||||
int num = output_dim0 * output_dim1;
|
||||
size_t num = output_dim0 * output_dim1;
|
||||
if (num > MAX_INT) {
|
||||
MS_LOG(EXCEPTION) << "Exceed MAX_INT: " << MAX_INT << ", dim0: " << output_dim0 << ", dim1: " << output_dim1;
|
||||
}
|
||||
|
||||
for (int write_index = 0; write_index < num; write_index++) {
|
||||
int i = write_index / output_dim1 % output_dim0;
|
||||
int j = write_index % output_dim1;
|
||||
for (size_t write_index = 0; write_index < num; write_index++) {
|
||||
size_t i = write_index / output_dim1 % output_dim0;
|
||||
size_t j = write_index % output_dim1;
|
||||
|
||||
int read_index = 0;
|
||||
for (size_t k = 0; k < indices_dim1; k++) {
|
||||
|
|
|
@ -90,6 +90,5 @@ void IsFiniteCPUKernel::LaunchKernelOther(const std::vector<AddressPtr> &inputs,
|
|||
output[i] = true;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -86,7 +86,6 @@ MS_REG_CPU_KERNEL(IsFinite, KernelAttr().AddInputAttr(kNumberTypeUInt32).AddOutp
|
|||
|
||||
MS_REG_CPU_KERNEL(IsFinite, KernelAttr().AddInputAttr(kNumberTypeUInt64).AddOutputAttr(kNumberTypeBool),
|
||||
IsFiniteCPUKernel);
|
||||
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@ void MaximumGradRecTask(T *x, T *y, T *dout, T *dx, T *dy, size_t dim, size_t x_
|
|||
size_t dout_i = i * dout_cargo[dim];
|
||||
|
||||
if (dim == dout_shape.size() - 1) {
|
||||
if (*(x + x_index + x_i) >= *(y + y_index + y_i)) {
|
||||
if (*(x + x_index + x_i) > *(y + y_index + y_i)) {
|
||||
*(dx + x_index + x_i) += *(dout + dout_index + i);
|
||||
} else {
|
||||
*(dy + y_index + y_i) += *(dout + dout_index + i);
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace kernel {
|
||||
|
||||
template <typename T>
|
||||
void MinimumCPUKernel<T>::InitKernel(const CNodePtr &kernel_node) {
|
||||
CheckParam(kernel_node);
|
||||
|
@ -147,7 +146,7 @@ void MinimumCPUKernel<T>::InitTensorBroadcastShape() {
|
|||
}
|
||||
}
|
||||
|
||||
// Broadcast comparation
|
||||
// Broadcast comparison
|
||||
template <typename T>
|
||||
size_t MinimumCPUKernel<T>::Index(const size_t &index, const size_t &dim) {
|
||||
return dim == 1 ? 0 : index;
|
||||
|
@ -216,6 +215,5 @@ void MinimumCPUKernel<T>::BroadcastArithTensors(const T *input_x, const T *input
|
|||
output[i] = MinimumFunc(input_x[i], input_y[i]);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -83,10 +83,11 @@ bool MinimumGradCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs,
|
|||
}
|
||||
|
||||
template <typename T>
|
||||
void MinimumGradRecTask(T *x, T *y, T *dout, T *dx, T *dy, size_t dim, size_t x_index, size_t y_index,
|
||||
size_t dout_index, const std::vector<size_t> &x_cargo, const std::vector<size_t> &y_cargo,
|
||||
const std::vector<size_t> &dout_cargo, const std::vector<size_t> &x_shape,
|
||||
const std::vector<size_t> &y_shape, const std::vector<size_t> &dout_shape) {
|
||||
void MinimumGradRecTask(const T *x, const T *y, const T *dout, T *dx, T *dy, const size_t dim, const size_t x_index,
|
||||
const size_t y_index, const size_t dout_index, const std::vector<size_t> &x_cargo,
|
||||
const std::vector<size_t> &y_cargo, const std::vector<size_t> &dout_cargo,
|
||||
const std::vector<size_t> &x_shape, const std::vector<size_t> &y_shape,
|
||||
const std::vector<size_t> &dout_shape) {
|
||||
for (size_t i = 0; i < dout_shape[dim]; i++) {
|
||||
size_t x_i = x_shape[dim] == dout_shape[dim] ? i * x_cargo[dim] : 0;
|
||||
size_t y_i = y_shape[dim] == dout_shape[dim] ? i * y_cargo[dim] : 0;
|
||||
|
@ -115,8 +116,8 @@ void MinimumGradCPUKernel::LaunchKernel(const std::vector<AddressPtr> &inputs, c
|
|||
|
||||
size_t x_tensor_len = GetTensorLen(x_shape_);
|
||||
size_t y_tensor_len = GetTensorLen(y_shape_);
|
||||
memset(dx_addr, 0, x_tensor_len * sizeof(T));
|
||||
memset(dy_addr, 0, y_tensor_len * sizeof(T));
|
||||
memset_s(dx_addr, x_tensor_len * sizeof(T), 0x00, x_tensor_len * sizeof(T));
|
||||
memset_s(dy_addr, y_tensor_len * sizeof(T), 0x00, y_tensor_len * sizeof(T));
|
||||
|
||||
std::vector<size_t> x_shape(dout_shape.size(), 1);
|
||||
std::vector<size_t> y_shape(dout_shape.size(), 1);
|
||||
|
|
|
@ -187,6 +187,5 @@ void MirrorPadCPUKernel::CheckParam(const CNodePtr &kernel_node) {
|
|||
MS_LOG(EXCEPTION) << "Output number is " << output_num << ", but MirrorPadCPUKernel needs 1 output.";
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -136,7 +136,7 @@ void MirrorPadGradCPUKernel::InitInputOutputSize(const CNodePtr &kernel_node) {
|
|||
}
|
||||
|
||||
template <typename T>
|
||||
void MirrorPadGradCPUKernel::MirrorPadGrad_Width_Height(const size_t size, const T *dy, T *interim_dy,
|
||||
void MirrorPadGradCPUKernel::MirrorPadGrad_Width_Height(const size_t size, const T *dy, const T *interim_dy,
|
||||
const int dx_batches, const int dx_channels,
|
||||
const int dx_height, const int dx_width, const int dy_height,
|
||||
const int dy_width, const int padd_dim,
|
||||
|
|
|
@ -58,14 +58,14 @@ class MirrorPadGradCPUKernel : public CPUKernel {
|
|||
const std::vector<AddressPtr> &outputs);
|
||||
|
||||
template <typename T>
|
||||
void MirrorPadGrad_Width_Height(const size_t size, const T *dy, T *interim_dy, const int dx_batches,
|
||||
void MirrorPadGrad_Width_Height(const size_t size, const T *dy, const T *interim_dy, const int dx_batches,
|
||||
const int dx_channels, const int dx_height, const int dx_width, const int dy_height,
|
||||
const int dy_width, const int padd_dim, const int64_t *paddings_arg, int mode, T *dx);
|
||||
|
||||
template <typename T>
|
||||
void MirrorPadGradBatchChannel(const size_t size, T *dy, T *interim_dy, const int dx_batches, const int dx_channels,
|
||||
const int dx_height, const int dx_width, const int dy_height, const int dy_width,
|
||||
const int padd_dim, const int64_t *paddings_arg, int mode, T *dx);
|
||||
const int padd_dim, const int64_t *paddings_arg, int mode, T *const dx);
|
||||
|
||||
private:
|
||||
void CheckParam(const CNodePtr &kernel_node);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -14,14 +14,14 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
#include <string>
|
||||
#include "backend/kernel_compiler/cpu/mkldnn/fused_batch_norm_cpu_kernel.h"
|
||||
#include "backend/kernel_compiler/cpu/mkldnn/batch_norm_cpu_kernel.h"
|
||||
#include "backend/kernel_compiler/cpu/mkldnn/mkl_kernel_engine.h"
|
||||
#include "runtime/device/cpu/cpu_device_address.h"
|
||||
#include "utils/ms_utils.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace kernel {
|
||||
void FusedBatchNormCPUKernel::InitInputOutputSize(const CNodePtr &kernel_node) {
|
||||
void BatchNormCPUKernel::InitInputOutputSize(const CNodePtr &kernel_node) {
|
||||
CPUKernel::InitInputOutputSize(kernel_node);
|
||||
MS_EXCEPTION_IF_NULL(kernel_node);
|
||||
size_t type_size = sizeof(float);
|
||||
|
@ -30,16 +30,13 @@ void FusedBatchNormCPUKernel::InitInputOutputSize(const CNodePtr &kernel_node) {
|
|||
workspace_size_list_.emplace_back(tensor_size);
|
||||
}
|
||||
|
||||
void FusedBatchNormCPUKernel::InitKernel(const CNodePtr &kernel_node) {
|
||||
void BatchNormCPUKernel::InitKernel(const CNodePtr &kernel_node) {
|
||||
MS_EXCEPTION_IF_NULL(kernel_node);
|
||||
auto node_name = AnfAlgo::GetCNodeName(kernel_node);
|
||||
if (node_name == "FusedBatchNorm") {
|
||||
momentum = AnfAlgo::GetNodeAttr<float>(kernel_node, "momentum");
|
||||
is_train = true;
|
||||
}
|
||||
is_train = AnfAlgo::GetNodeAttr<bool>(kernel_node, "is_training");
|
||||
momentum = AnfAlgo::GetNodeAttr<float>(kernel_node, "momentum");
|
||||
std::vector<size_t> x_shape = AnfAlgo::GetInputDeviceShape(kernel_node, 0);
|
||||
if (x_shape.size() != 4) {
|
||||
MS_LOG(EXCEPTION) << "Fused batchnorm only support nchw input!";
|
||||
MS_LOG(EXCEPTION) << "Batchnorm only support nchw input!";
|
||||
}
|
||||
batch_size = x_shape[0];
|
||||
channel = x_shape[1];
|
||||
|
@ -66,9 +63,9 @@ void FusedBatchNormCPUKernel::InitKernel(const CNodePtr &kernel_node) {
|
|||
AddArgument(DNNL_ARG_DST, x_desc);
|
||||
}
|
||||
|
||||
bool FusedBatchNormCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs,
|
||||
const std::vector<kernel::AddressPtr> &workspace,
|
||||
const std::vector<kernel::AddressPtr> &outputs) {
|
||||
bool BatchNormCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs,
|
||||
const std::vector<kernel::AddressPtr> &workspace,
|
||||
const std::vector<kernel::AddressPtr> &outputs) {
|
||||
if (inputs.size() < 5 || outputs.empty()) {
|
||||
MS_LOG(EXCEPTION) << "Error input output size!";
|
||||
}
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -13,18 +13,18 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_FUSED_BATCH_NORM_CPU_KERNEL_H_
|
||||
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_FUSED_BATCH_NORM_CPU_KERNEL_H_
|
||||
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_BATCH_NORM_CPU_KERNEL_H_
|
||||
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_BATCH_NORM_CPU_KERNEL_H_
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include "backend/kernel_compiler/cpu/mkldnn/mkl_cpu_kernel.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace kernel {
|
||||
class FusedBatchNormCPUKernel : public MKLCPUKernel {
|
||||
class BatchNormCPUKernel : public MKLCPUKernel {
|
||||
public:
|
||||
FusedBatchNormCPUKernel() = default;
|
||||
~FusedBatchNormCPUKernel() override = default;
|
||||
BatchNormCPUKernel() = default;
|
||||
~BatchNormCPUKernel() override = default;
|
||||
|
||||
void InitKernel(const CNodePtr &kernel_node) override;
|
||||
|
||||
|
@ -43,20 +43,6 @@ class FusedBatchNormCPUKernel : public MKLCPUKernel {
|
|||
size_t nhw_size{0};
|
||||
};
|
||||
|
||||
MS_REG_CPU_KERNEL(FusedBatchNorm,
|
||||
KernelAttr()
|
||||
.AddInputAttr(kNumberTypeFloat32)
|
||||
.AddInputAttr(kNumberTypeFloat32)
|
||||
.AddInputAttr(kNumberTypeFloat32)
|
||||
.AddInputAttr(kNumberTypeFloat32)
|
||||
.AddInputAttr(kNumberTypeFloat32)
|
||||
.AddOutputAttr(kNumberTypeFloat32)
|
||||
.AddOutputAttr(kNumberTypeFloat32)
|
||||
.AddOutputAttr(kNumberTypeFloat32)
|
||||
.AddOutputAttr(kNumberTypeFloat32)
|
||||
.AddOutputAttr(kNumberTypeFloat32),
|
||||
FusedBatchNormCPUKernel)
|
||||
|
||||
MS_REG_CPU_KERNEL(BatchNorm,
|
||||
KernelAttr()
|
||||
.AddInputAttr(kNumberTypeFloat32)
|
||||
|
@ -69,7 +55,7 @@ MS_REG_CPU_KERNEL(BatchNorm,
|
|||
.AddOutputAttr(kNumberTypeFloat32)
|
||||
.AddOutputAttr(kNumberTypeFloat32)
|
||||
.AddOutputAttr(kNumberTypeFloat32),
|
||||
FusedBatchNormCPUKernel)
|
||||
BatchNormCPUKernel)
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -13,7 +13,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "backend/kernel_compiler/cpu/mkldnn/fused_batch_norm_gard_cpu_kernel.h"
|
||||
#include "backend/kernel_compiler/cpu/mkldnn/batch_norm_gard_cpu_kernel.h"
|
||||
|
||||
#include <string>
|
||||
#include "backend/kernel_compiler/cpu/mkldnn/mkl_kernel_engine.h"
|
||||
|
@ -22,19 +22,20 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace kernel {
|
||||
void FusedBatchNormGradCPUKernel::InitInputOutputSize(const CNodePtr &kernel_node) {
|
||||
void BatchNormGradCPUKernel::InitInputOutputSize(const CNodePtr &kernel_node) {
|
||||
CPUKernel::InitInputOutputSize(kernel_node);
|
||||
MS_EXCEPTION_IF_NULL(kernel_node);
|
||||
size_t type_size = sizeof(float);
|
||||
std::vector<size_t> shape = AnfAlgo::GetInputDeviceShape(kernel_node, 0);
|
||||
size_t tensor_size = shape[1] * 2 * type_size;
|
||||
input_size_list_.pop_back();
|
||||
// [2, c] to store scale and bias
|
||||
workspace_size_list_.emplace_back(tensor_size);
|
||||
// [2, c] to store diff_scale and diff_bias
|
||||
workspace_size_list_.emplace_back(tensor_size);
|
||||
}
|
||||
|
||||
void FusedBatchNormGradCPUKernel::InitKernel(const CNodePtr &kernel_node) {
|
||||
void BatchNormGradCPUKernel::InitKernel(const CNodePtr &kernel_node) {
|
||||
MS_EXCEPTION_IF_NULL(kernel_node);
|
||||
std::vector<size_t> x_shape = AnfAlgo::GetInputDeviceShape(kernel_node, 0);
|
||||
if (x_shape.size() != 4) {
|
||||
|
@ -72,25 +73,25 @@ void FusedBatchNormGradCPUKernel::InitKernel(const CNodePtr &kernel_node) {
|
|||
AddArgument(DNNL_ARG_DIFF_SCALE_SHIFT, scale_bias_desc);
|
||||
}
|
||||
|
||||
bool FusedBatchNormGradCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs,
|
||||
const std::vector<kernel::AddressPtr> &workspace,
|
||||
const std::vector<kernel::AddressPtr> &outputs) {
|
||||
bool BatchNormGradCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs,
|
||||
const std::vector<kernel::AddressPtr> &workspace,
|
||||
const std::vector<kernel::AddressPtr> &outputs) {
|
||||
if (inputs.size() < 5 || outputs.empty()) {
|
||||
MS_LOG(EXCEPTION) << "Error input output size!";
|
||||
}
|
||||
auto wksp_in = reinterpret_cast<float *>(workspace[0]->addr);
|
||||
auto scale_ret = memcpy_s(wksp_in, workspace[0]->size, inputs[2]->addr, inputs[2]->size);
|
||||
auto max_size = workspace[0]->size - inputs[2]->size;
|
||||
auto bias_ret = memcpy_s(wksp_in + (inputs[2]->size / sizeof(float)), max_size, inputs[3]->addr, inputs[3]->size);
|
||||
if (scale_ret != 0 || bias_ret != 0) {
|
||||
auto bias_ret = memset_s(wksp_in + (inputs[2]->size / sizeof(float)), max_size, 0., max_size);
|
||||
if (scale_ret != 0 && bias_ret != 0) {
|
||||
MS_LOG(EXCEPTION) << "Memcpy_s error.";
|
||||
return false;
|
||||
}
|
||||
|
||||
SetArgumentHandle(DNNL_ARG_DIFF_DST, inputs[0]->addr);
|
||||
SetArgumentHandle(DNNL_ARG_SRC, inputs[1]->addr);
|
||||
SetArgumentHandle(DNNL_ARG_MEAN, inputs[4]->addr);
|
||||
SetArgumentHandle(DNNL_ARG_VARIANCE, inputs[5]->addr);
|
||||
SetArgumentHandle(DNNL_ARG_MEAN, inputs[3]->addr);
|
||||
SetArgumentHandle(DNNL_ARG_VARIANCE, inputs[4]->addr);
|
||||
SetArgumentHandle(DNNL_ARG_SCALE_SHIFT, workspace[0]->addr);
|
||||
SetArgumentHandle(DNNL_ARG_DIFF_SRC, outputs[0]->addr);
|
||||
SetArgumentHandle(DNNL_ARG_DIFF_SCALE_SHIFT, workspace[1]->addr);
|
||||
|
@ -99,7 +100,7 @@ bool FusedBatchNormGradCPUKernel::Launch(const std::vector<kernel::AddressPtr> &
|
|||
auto wksp_out = reinterpret_cast<float *>(workspace[1]->addr);
|
||||
auto diff_scale_ret = memcpy_s(outputs[1]->addr, outputs[1]->size, wksp_out, inputs[2]->size);
|
||||
auto diff_bias_ret =
|
||||
memcpy_s(outputs[2]->addr, outputs[2]->size, wksp_out + (outputs[1]->size / sizeof(float)), inputs[3]->size);
|
||||
memcpy_s(outputs[2]->addr, outputs[2]->size, wksp_out + (outputs[1]->size / sizeof(float)), outputs[2]->size);
|
||||
if (diff_scale_ret != 0 || diff_bias_ret != 0) {
|
||||
MS_LOG(EXCEPTION) << "Memcpy_s error.";
|
||||
return false;
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -13,18 +13,18 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_FUSED_BATCH_NORM_GRAD_CPU_KERNEL_H_
|
||||
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_FUSED_BATCH_NORM_GRAD_CPU_KERNEL_H_
|
||||
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_BATCH_NORM_GRAD_CPU_KERNEL_H_
|
||||
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_BATCH_NORM_GRAD_CPU_KERNEL_H_
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include "backend/kernel_compiler/cpu/mkldnn/mkl_cpu_kernel.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace kernel {
|
||||
class FusedBatchNormGradCPUKernel : public MKLCPUKernel {
|
||||
class BatchNormGradCPUKernel : public MKLCPUKernel {
|
||||
public:
|
||||
FusedBatchNormGradCPUKernel() = default;
|
||||
~FusedBatchNormGradCPUKernel() override = default;
|
||||
BatchNormGradCPUKernel() = default;
|
||||
~BatchNormGradCPUKernel() override = default;
|
||||
|
||||
void InitKernel(const CNodePtr &kernel_node) override;
|
||||
|
||||
|
@ -42,7 +42,7 @@ class FusedBatchNormGradCPUKernel : public MKLCPUKernel {
|
|||
size_t nhw_size{0};
|
||||
};
|
||||
|
||||
MS_REG_CPU_KERNEL(FusedBatchNormGradCPU,
|
||||
MS_REG_CPU_KERNEL(BatchNormGrad,
|
||||
KernelAttr()
|
||||
.AddInputAttr(kNumberTypeFloat32)
|
||||
.AddInputAttr(kNumberTypeFloat32)
|
||||
|
@ -53,7 +53,7 @@ MS_REG_CPU_KERNEL(FusedBatchNormGradCPU,
|
|||
.AddOutputAttr(kNumberTypeFloat32)
|
||||
.AddOutputAttr(kNumberTypeFloat32)
|
||||
.AddOutputAttr(kNumberTypeFloat32),
|
||||
FusedBatchNormGradCPUKernel)
|
||||
BatchNormGradCPUKernel)
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
||||
|
|
@ -0,0 +1,55 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "backend/kernel_compiler/cpu/mkldnn/log_softmax_cpu_kernel.h"
|
||||
#include <algorithm>
|
||||
#include "backend/kernel_compiler/cpu/mkldnn/mkl_kernel_engine.h"
|
||||
#include "runtime/device/cpu/cpu_device_address.h"
|
||||
#include "utils/ms_utils.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace kernel {
|
||||
void LogSoftmaxCPUKernel::InitKernel(const CNodePtr &kernel_node) {
|
||||
MS_EXCEPTION_IF_NULL(kernel_node);
|
||||
std::vector<size_t> src_shape = AnfAlgo::GetInputDeviceShape(kernel_node, 0);
|
||||
int axis = AnfAlgo::GetNodeAttr<int64_t>(kernel_node, AXIS);
|
||||
if (axis >= SizeToInt(src_shape.size())) {
|
||||
axis = SizeToInt(src_shape.size()) - 1;
|
||||
}
|
||||
while (axis < 0) {
|
||||
axis += SizeToInt(src_shape.size());
|
||||
}
|
||||
dnnl::memory::desc src_desc = GetDefaultMemDesc(src_shape);
|
||||
dnnl::logsoftmax_forward::desc desc =
|
||||
dnnl::logsoftmax_forward::desc(dnnl::prop_kind::forward_training, src_desc, axis);
|
||||
auto prim_desc = dnnl::logsoftmax_forward::primitive_desc(desc, MKLKernelEngine::Get().engine());
|
||||
primitive_ = std::make_shared<dnnl::logsoftmax_forward>(prim_desc);
|
||||
AddArgument(DNNL_ARG_SRC, src_desc);
|
||||
AddArgument(DNNL_ARG_DST, src_desc);
|
||||
}
|
||||
|
||||
bool LogSoftmaxCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs,
|
||||
const std::vector<kernel::AddressPtr> & /*workspace*/,
|
||||
const std::vector<kernel::AddressPtr> &outputs) {
|
||||
if (inputs.empty() || outputs.empty()) {
|
||||
MS_LOG(EXCEPTION) << "log softmax error input output size!";
|
||||
}
|
||||
SetArgumentHandle(DNNL_ARG_SRC, inputs[0]->addr);
|
||||
SetArgumentHandle(DNNL_ARG_DST, outputs[0]->addr);
|
||||
ExecutePrimitive();
|
||||
return true;
|
||||
}
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue