diff --git a/.gitignore b/.gitignore index f538a9de..a6ff1921 100644 --- a/.gitignore +++ b/.gitignore @@ -3,4 +3,10 @@ node_modules/ .DS_Store npm-debug.log *.orig +*.iml package-lock.json +/en-us/ +/zh-cn/ +/index.html +md_json/ +log diff --git a/build/blog.js b/build/blog.js index 45b4390a..b94e1a4d 100644 --- a/build/blog.js +++ b/build/blog.js @@ -1,6 +1,6 @@ -!function(e){function t(r){if(n[r])return n[r].exports;var a=n[r]={i:r,l:!1,exports:{}};return e[r].call(a.exports,a,a.exports,t),a.l=!0,a.exports}var n={};t.m=e,t.c=n,t.i=function(e){return e},t.d=function(e,n,r){t.o(e,n)||Object.defineProperty(e,n,{configurable:!1,enumerable:!0,get:r})},t.n=function(e){var n=e&&e.__esModule?function(){return e.default}:function(){return e};return t.d(n,"a",n),n},t.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},t.p="/build/",t(t.s=79)}([function(e,t,n){"use strict";function r(e,t,n,r){n&&Object.defineProperty(e,t,{enumerable:n.enumerable,configurable:n.configurable,writable:n.writable,value:n.initializer?n.initializer.call(r):void 0})}function a(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function o(e,t,n,r,a){var o={};return Object.keys(r).forEach(function(e){o[e]=r[e]}),o.enumerable=!!o.enumerable,o.configurable=!!o.configurable,("value"in o||o.initializer)&&(o.writable=!0),o=n.slice().reverse().reduce(function(n,r){return r(e,t,n)||n},o),a&&void 0!==o.initializer&&(o.value=o.initializer?o.initializer.call(a):void 0,o.initializer=void 0),void 0===o.initializer&&(Object.defineProperty(e,t,o),o=null),o}function i(e){if(Array.isArray(e)){for(var t=0,n=Array(e.length);t1&&/^\/[^\/]/.test(""+e)?""+window.rootPath+e:e},t.parseJSONStr=function(e){try{return JSON.parse(e)}catch(t){return e}}},function(e,t,n){e.exports=n(33)()},function(e,t){e.exports=ReactDOM},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default={rootPath:"",port:8080,domain:"seata.io",defaultSearch:"baidu",defaultLanguage:"en-us","en-us":{pageMenu:[{key:"home",text:"HOME",link:"/en-us/index.html"},{key:"docs",text:"DOCS",link:"/en-us/docs/overview/what-is-seata.html"},{key:"developers",text:"DEVELOPERS",link:"/en-us/docs/developers/developers_dev.html"},{key:"blog",text:"BLOG",link:"/en-us/blog/index.html"},{key:"community",text:"COMMUNITY",link:"/en-us/community/index.html"},{key:"download",text:"DOWNLOAD",link:"/en-us/blog/download.html"}],vision:{title:"Vision",content:"Seata is an Alibaba open source distributed transaction solution that delivers high performance and easy to use distributed transaction services under a microservices architecture."},documentation:{title:"Documentation",list:[{text:"What is Seata?",link:"/en-us/docs/overview/what-is-seata.html"},{text:"Quick Start",link:"/en-us/docs/user/quickstart.html"},{text:"Report a doc issue",link:"https://github.com/seata/seata.github.io/issues/new"},{text:"Edit This Page on GitHub",link:"https://github.com/seata/seata.github.io"}]},resources:{title:"Resources",list:[{text:"Blog",link:"/en-us/blog/index.html"},{text:"Community",link:"/en-us/community/index.html"}]},copyright:"Copyright © 2019 Seata"},"zh-cn":{pageMenu:[{key:"home",text:"首页",link:"/zh-cn/index.html"},{key:"docs",text:"文档",link:"/zh-cn/docs/overview/what-is-seata.html"},{key:"developers",text:"开发者",link:"/zh-cn/docs/developers/developers_dev.html"},{key:"blog",text:"博客",link:"/zh-cn/blog/index.html"},{key:"community",text:"社区",link:"/zh-cn/community/index.html"},{key:"download",text:"下载",link:"/zh-cn/blog/download.html"}],vision:{title:"愿景",content:"Seata 是一款阿里巴巴开源的分布式事务解决方案,致力于在微服务架构下提供高性能和简单易用的分布式事务服务。"},documentation:{title:"文档",list:[{text:"Seata 是什么?",link:"/zh-cn/docs/overview/what-is-seata.html"},{text:"快速开始",link:"/zh-cn/docs/user/quickstart.html"},{text:"报告文档问题",link:"https://github.com/seata/seata.github.io/issues/new"},{text:"在Github上编辑此文档",link:"https://github.com/seata/seata.github.io"}]},resources:{title:"资源",list:[{text:"博客",link:"/zh-cn/blog/index.html"},{text:"社区",link:"/zh-cn/community/index.html"}]},copyright:"Copyright © 2019 Seata"}}},function(e,t,n){var r,a;/*! +!function(e){function t(n){if(a[n])return a[n].exports;var o=a[n]={i:n,l:!1,exports:{}};return e[n].call(o.exports,o,o.exports,t),o.l=!0,o.exports}var a={};t.m=e,t.c=a,t.i=function(e){return e},t.d=function(e,a,n){t.o(e,a)||Object.defineProperty(e,a,{configurable:!1,enumerable:!0,get:n})},t.n=function(e){var a=e&&e.__esModule?function(){return e.default}:function(){return e};return t.d(a,"a",a),a},t.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},t.p="/build/",t(t.s=59)}([function(e,t){e.exports=React},function(e,t,a){"use strict";Object.defineProperty(t,"__esModule",{value:!0});t.throttle=function(e,t){var a=null;return function(){for(var n=arguments.length,o=Array(n),r=0;r1&&/^\/[^\/]/.test(""+e)?""+window.rootPath+e:e},t.parseJSONStr=function(e){try{return JSON.parse(e)}catch(t){return e}}},function(e,t,a){e.exports=a(13)()},function(e,t){e.exports=ReactDOM},function(e,t,a){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default={rootPath:"",port:8080,domain:"seata.io",defaultSearch:"baidu",defaultLanguage:"en-us","en-us":{pageMenu:[{key:"home",text:"HOME",link:"/en-us/index.html"},{key:"docs",text:"DOCS",link:"/en-us/docs/overview/what-is-seata.html"},{key:"developers",text:"DEVELOPERS",link:"/en-us/docs/developers/developers_dev.html"},{key:"blog",text:"BLOG",link:"/en-us/blog/index.html"},{key:"community",text:"COMMUNITY",link:"/en-us/community/index.html"},{key:"download",text:"DOWNLOAD",link:"/en-us/blog/download.html"}],vision:{title:"Vision",content:"Seata is an Alibaba open source distributed transaction solution that delivers high performance and easy to use distributed transaction services under a microservices architecture."},documentation:{title:"Documentation",list:[{text:"What is Seata?",link:"/en-us/docs/overview/what-is-seata.html"},{text:"Quick Start",link:"/en-us/docs/user/quickstart.html"},{text:"Report a doc issue",link:"https://github.com/seata/seata.github.io/issues/new"},{text:"Edit This Page on GitHub",link:"https://github.com/seata/seata.github.io"}]},resources:{title:"Resources",list:[{text:"Blog",link:"/en-us/blog/index.html"},{text:"Community",link:"/en-us/community/index.html"}]},copyright:"Copyright © 2019 Seata"},"zh-cn":{pageMenu:[{key:"home",text:"首页",link:"/zh-cn/index.html"},{key:"docs",text:"文档",link:"/zh-cn/docs/overview/what-is-seata.html"},{key:"developers",text:"开发者",link:"/zh-cn/docs/developers/developers_dev.html"},{key:"blog",text:"博客",link:"/zh-cn/blog/index.html"},{key:"community",text:"社区",link:"/zh-cn/community/index.html"},{key:"download",text:"下载",link:"/zh-cn/blog/download.html"}],vision:{title:"愿景",content:"Seata 是一款阿里巴巴开源的分布式事务解决方案,致力于在微服务架构下提供高性能和简单易用的分布式事务服务。"},documentation:{title:"文档",list:[{text:"Seata 是什么?",link:"/zh-cn/docs/overview/what-is-seata.html"},{text:"快速开始",link:"/zh-cn/docs/user/quickstart.html"},{text:"报告文档问题",link:"https://github.com/seata/seata.github.io/issues/new"},{text:"在Github上编辑此文档",link:"https://github.com/seata/seata.github.io"}]},resources:{title:"资源",list:[{text:"博客",link:"/zh-cn/blog/index.html"},{text:"社区",link:"/zh-cn/community/index.html"}]},copyright:"Copyright © 2019 Seata"}}},function(e,t,a){var n,o;/*! Copyright (c) 2017 Jed Watson. Licensed under the MIT License (MIT), see http://jedwatson.github.io/classnames */ -!function(){"use strict";function n(){for(var e=[],t=0;t1&&void 0!==arguments[1]?arguments[1]:t.key)+arguments[2]})}},{key:"key",get:function(){return this.childDescriptor.key}},{key:"parentNotation",get:function(){return this.parentKlass.constructor.name+"#"+this.parentPropertySignature}},{key:"childNotation",get:function(){return this.childKlass.constructor.name+"#"+this.childPropertySignature}},{key:"parentTopic",get:function(){return this._getTopic(this.parentDescriptor)}},{key:"childTopic",get:function(){return this._getTopic(this.childDescriptor)}},{key:"parentPropertySignature",get:function(){return this._extractTopicSignature(this.parentTopic)}},{key:"childPropertySignature",get:function(){return this._extractTopicSignature(this.childTopic)}}]),h(e,[{key:"assert",value:function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";!0!==e&&this.error("{child} does not properly override {parent}"+t)}},{key:"error",value:function(e){var t=this;throw e=e.replace("{parent}",function(e){return t.parentNotation}).replace("{child}",function(e){return t.childNotation}),new SyntaxError(e)}}]),e}(),g=[function(e){return e.toLowerCase()},function(e){return e.toUpperCase()},function(e){return e+"s"},function(e){return e.slice(0,-1)},function(e){return e.slice(1,e.length)}]},function(e,t,n){"use strict";function r(e,t,r,c){var s=l(c,3),f=s[0],d=void 0===f?null:f,p=s[1],h=void 0!==p&&p,m=s[2],y=void 0===m?u:m;if(!a.__enabled)return a.__warned||(y.warn("console.profile is not supported. All @profile decorators are disabled."),a.__warned=!0),r;var g=r.value;if(null===d&&(d=e.constructor.name+"."+t),"function"!=typeof g)throw new SyntaxError("@profile can only be used on functions, not: "+g);return i({},r,{value:function(){var e=Date.now(),t=n.i(o.c)(this);(!0===h&&!t.profileLastRan||!1===h||"number"==typeof h&&e-t.profileLastRan>h||"function"==typeof h&&h.apply(this,arguments))&&(y.profile(d),t.profileLastRan=e);try{return g.apply(this,arguments)}finally{y.profileEnd(d)}}})}function a(){for(var e=arguments.length,t=Array(e),a=0;a1){if(o=e({path:"/"},r.defaults,o),"number"==typeof o.expires){var l=new Date;l.setMilliseconds(l.getMilliseconds()+864e5*o.expires),o.expires=l}o.expires=o.expires?o.expires.toUTCString():"";try{i=JSON.stringify(a),/^[\{\[]/.test(i)&&(a=i)}catch(e){}a=n.write?n.write(a,t):encodeURIComponent(String(a)).replace(/%(23|24|26|2B|3A|3C|3E|3D|2F|3F|40|5B|5D|5E|60|7B|7D|7C)/g,decodeURIComponent),t=encodeURIComponent(String(t)),t=t.replace(/%(23|24|26|2B|5E|60|7C)/g,decodeURIComponent),t=t.replace(/[\(\)]/g,escape);var u="";for(var c in o)o[c]&&(u+="; "+c,!0!==o[c]&&(u+="="+o[c]));return document.cookie=t+"="+a+u}t||(i={});for(var s=document.cookie?document.cookie.split("; "):[],f=/(%[0-9A-Z]{2})+/g,d=0;d1){if(r=e({path:"/"},n.defaults,r),"number"==typeof r.expires){var l=new Date;l.setMilliseconds(l.getMilliseconds()+864e5*r.expires),r.expires=l}r.expires=r.expires?r.expires.toUTCString():"";try{i=JSON.stringify(o),/^[\{\[]/.test(i)&&(o=i)}catch(e){}o=a.write?a.write(o,t):encodeURIComponent(String(o)).replace(/%(23|24|26|2B|3A|3C|3E|3D|2F|3F|40|5B|5D|5E|60|7B|7D|7C)/g,decodeURIComponent),t=encodeURIComponent(String(t)),t=t.replace(/%(23|24|26|2B|5E|60|7C)/g,decodeURIComponent),t=t.replace(/[\(\)]/g,escape);var s="";for(var u in r)r[u]&&(s+="; "+u,!0!==r[u]&&(s+="="+r[u]));return document.cookie=t+"="+o+s}t||(i={});for(var c=document.cookie?document.cookie.split("; "):[],d=/(%[0-9A-Z]{2})+/g,f=0;f1&&/^\/[^\/]/.test(""+e)?""+window.rootPath+e:e},t.parseJSONStr=function(e){try{return JSON.parse(e)}catch(t){return e}}},function(e,t,n){e.exports=n(33)()},function(e,t){e.exports=ReactDOM},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default={rootPath:"",port:8080,domain:"seata.io",defaultSearch:"baidu",defaultLanguage:"en-us","en-us":{pageMenu:[{key:"home",text:"HOME",link:"/en-us/index.html"},{key:"docs",text:"DOCS",link:"/en-us/docs/overview/what-is-seata.html"},{key:"developers",text:"DEVELOPERS",link:"/en-us/docs/developers/developers_dev.html"},{key:"blog",text:"BLOG",link:"/en-us/blog/index.html"},{key:"community",text:"COMMUNITY",link:"/en-us/community/index.html"},{key:"download",text:"DOWNLOAD",link:"/en-us/blog/download.html"}],vision:{title:"Vision",content:"Seata is an Alibaba open source distributed transaction solution that delivers high performance and easy to use distributed transaction services under a microservices architecture."},documentation:{title:"Documentation",list:[{text:"What is Seata?",link:"/en-us/docs/overview/what-is-seata.html"},{text:"Quick Start",link:"/en-us/docs/user/quickstart.html"},{text:"Report a doc issue",link:"https://github.com/seata/seata.github.io/issues/new"},{text:"Edit This Page on GitHub",link:"https://github.com/seata/seata.github.io"}]},resources:{title:"Resources",list:[{text:"Blog",link:"/en-us/blog/index.html"},{text:"Community",link:"/en-us/community/index.html"}]},copyright:"Copyright © 2019 Seata"},"zh-cn":{pageMenu:[{key:"home",text:"首页",link:"/zh-cn/index.html"},{key:"docs",text:"文档",link:"/zh-cn/docs/overview/what-is-seata.html"},{key:"developers",text:"开发者",link:"/zh-cn/docs/developers/developers_dev.html"},{key:"blog",text:"博客",link:"/zh-cn/blog/index.html"},{key:"community",text:"社区",link:"/zh-cn/community/index.html"},{key:"download",text:"下载",link:"/zh-cn/blog/download.html"}],vision:{title:"愿景",content:"Seata 是一款阿里巴巴开源的分布式事务解决方案,致力于在微服务架构下提供高性能和简单易用的分布式事务服务。"},documentation:{title:"文档",list:[{text:"Seata 是什么?",link:"/zh-cn/docs/overview/what-is-seata.html"},{text:"快速开始",link:"/zh-cn/docs/user/quickstart.html"},{text:"报告文档问题",link:"https://github.com/seata/seata.github.io/issues/new"},{text:"在Github上编辑此文档",link:"https://github.com/seata/seata.github.io"}]},resources:{title:"资源",list:[{text:"博客",link:"/zh-cn/blog/index.html"},{text:"社区",link:"/zh-cn/community/index.html"}]},copyright:"Copyright © 2019 Seata"}}},function(e,t,n){var r,o;/*! +!function(e){function t(r){if(n[r])return n[r].exports;var o=n[r]={i:r,l:!1,exports:{}};return e[r].call(o.exports,o,o.exports,t),o.l=!0,o.exports}var n={};t.m=e,t.c=n,t.i=function(e){return e},t.d=function(e,n,r){t.o(e,n)||Object.defineProperty(e,n,{configurable:!1,enumerable:!0,get:r})},t.n=function(e){var n=e&&e.__esModule?function(){return e.default}:function(){return e};return t.d(n,"a",n),n},t.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},t.p="/build/",t(t.s=60)}([function(e,t){e.exports=React},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});t.throttle=function(e,t){var n=null;return function(){for(var r=arguments.length,o=Array(r),i=0;i1&&/^\/[^\/]/.test(""+e)?""+window.rootPath+e:e},t.parseJSONStr=function(e){try{return JSON.parse(e)}catch(t){return e}}},function(e,t,n){e.exports=n(13)()},function(e,t){e.exports=ReactDOM},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default={rootPath:"",port:8080,domain:"seata.io",defaultSearch:"baidu",defaultLanguage:"en-us","en-us":{pageMenu:[{key:"home",text:"HOME",link:"/en-us/index.html"},{key:"docs",text:"DOCS",link:"/en-us/docs/overview/what-is-seata.html"},{key:"developers",text:"DEVELOPERS",link:"/en-us/docs/developers/developers_dev.html"},{key:"blog",text:"BLOG",link:"/en-us/blog/index.html"},{key:"community",text:"COMMUNITY",link:"/en-us/community/index.html"},{key:"download",text:"DOWNLOAD",link:"/en-us/blog/download.html"}],vision:{title:"Vision",content:"Seata is an Alibaba open source distributed transaction solution that delivers high performance and easy to use distributed transaction services under a microservices architecture."},documentation:{title:"Documentation",list:[{text:"What is Seata?",link:"/en-us/docs/overview/what-is-seata.html"},{text:"Quick Start",link:"/en-us/docs/user/quickstart.html"},{text:"Report a doc issue",link:"https://github.com/seata/seata.github.io/issues/new"},{text:"Edit This Page on GitHub",link:"https://github.com/seata/seata.github.io"}]},resources:{title:"Resources",list:[{text:"Blog",link:"/en-us/blog/index.html"},{text:"Community",link:"/en-us/community/index.html"}]},copyright:"Copyright © 2019 Seata"},"zh-cn":{pageMenu:[{key:"home",text:"首页",link:"/zh-cn/index.html"},{key:"docs",text:"文档",link:"/zh-cn/docs/overview/what-is-seata.html"},{key:"developers",text:"开发者",link:"/zh-cn/docs/developers/developers_dev.html"},{key:"blog",text:"博客",link:"/zh-cn/blog/index.html"},{key:"community",text:"社区",link:"/zh-cn/community/index.html"},{key:"download",text:"下载",link:"/zh-cn/blog/download.html"}],vision:{title:"愿景",content:"Seata 是一款阿里巴巴开源的分布式事务解决方案,致力于在微服务架构下提供高性能和简单易用的分布式事务服务。"},documentation:{title:"文档",list:[{text:"Seata 是什么?",link:"/zh-cn/docs/overview/what-is-seata.html"},{text:"快速开始",link:"/zh-cn/docs/user/quickstart.html"},{text:"报告文档问题",link:"https://github.com/seata/seata.github.io/issues/new"},{text:"在Github上编辑此文档",link:"https://github.com/seata/seata.github.io"}]},resources:{title:"资源",list:[{text:"博客",link:"/zh-cn/blog/index.html"},{text:"社区",link:"/zh-cn/community/index.html"}]},copyright:"Copyright © 2019 Seata"}}},function(e,t,n){var r,o;/*! Copyright (c) 2017 Jed Watson. Licensed under the MIT License (MIT), see http://jedwatson.github.io/classnames */ -!function(){"use strict";function n(){for(var e=[],t=0;t1&&void 0!==arguments[1]?arguments[1]:t.key)+arguments[2]})}},{key:"key",get:function(){return this.childDescriptor.key}},{key:"parentNotation",get:function(){return this.parentKlass.constructor.name+"#"+this.parentPropertySignature}},{key:"childNotation",get:function(){return this.childKlass.constructor.name+"#"+this.childPropertySignature}},{key:"parentTopic",get:function(){return this._getTopic(this.parentDescriptor)}},{key:"childTopic",get:function(){return this._getTopic(this.childDescriptor)}},{key:"parentPropertySignature",get:function(){return this._extractTopicSignature(this.parentTopic)}},{key:"childPropertySignature",get:function(){return this._extractTopicSignature(this.childTopic)}}]),h(e,[{key:"assert",value:function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";!0!==e&&this.error("{child} does not properly override {parent}"+t)}},{key:"error",value:function(e){var t=this;throw e=e.replace("{parent}",function(e){return t.parentNotation}).replace("{child}",function(e){return t.childNotation}),new SyntaxError(e)}}]),e}(),m=[function(e){return e.toLowerCase()},function(e){return e.toUpperCase()},function(e){return e+"s"},function(e){return e.slice(0,-1)},function(e){return e.slice(1,e.length)}]},function(e,t,n){"use strict";function r(e,t,r,l){var c=u(l,3),f=c[0],p=void 0===f?null:f,d=c[1],h=void 0!==d&&d,y=c[2],v=void 0===y?s:y;if(!o.__enabled)return o.__warned||(v.warn("console.profile is not supported. All @profile decorators are disabled."),o.__warned=!0),r;var m=r.value;if(null===p&&(p=e.constructor.name+"."+t),"function"!=typeof m)throw new SyntaxError("@profile can only be used on functions, not: "+m);return a({},r,{value:function(){var e=Date.now(),t=n.i(i.c)(this);(!0===h&&!t.profileLastRan||!1===h||"number"==typeof h&&e-t.profileLastRan>h||"function"==typeof h&&h.apply(this,arguments))&&(v.profile(p),t.profileLastRan=e);try{return m.apply(this,arguments)}finally{v.profileEnd(p)}}})}function o(){for(var e=arguments.length,t=Array(e),o=0;o1){if(i=e({path:"/"},r.defaults,i),"number"==typeof i.expires){var u=new Date;u.setMilliseconds(u.getMilliseconds()+864e5*i.expires),i.expires=u}i.expires=i.expires?i.expires.toUTCString():"";try{a=JSON.stringify(o),/^[\{\[]/.test(a)&&(o=a)}catch(e){}o=n.write?n.write(o,t):encodeURIComponent(String(o)).replace(/%(23|24|26|2B|3A|3C|3E|3D|2F|3F|40|5B|5D|5E|60|7B|7D|7C)/g,decodeURIComponent),t=encodeURIComponent(String(t)),t=t.replace(/%(23|24|26|2B|5E|60|7C)/g,decodeURIComponent),t=t.replace(/[\(\)]/g,escape);var s="";for(var l in i)i[l]&&(s+="; "+l,!0!==i[l]&&(s+="="+i[l]));return document.cookie=t+"="+o+s}t||(a={});for(var c=document.cookie?document.cookie.split("; "):[],f=/(%[0-9A-Z]{2})+/g,p=0;p-1?t:e}function d(e,t){t=t||{};var n=t.body;if(e instanceof d){if(e.bodyUsed)throw new TypeError("Already read");this.url=e.url,this.credentials=e.credentials,t.headers||(this.headers=new o(e.headers)),this.method=e.method,this.mode=e.mode,n||null==e._bodyInit||(n=e._bodyInit,e.bodyUsed=!0)}else this.url=String(e);if(this.credentials=t.credentials||this.credentials||"omit",!t.headers&&this.headers||(this.headers=new o(t.headers)),this.method=p(t.method||this.method||"GET"),this.mode=t.mode||this.mode||null,this.referrer=null,("GET"===this.method||"HEAD"===this.method)&&n)throw new TypeError("Body not allowed for GET or HEAD requests");this._initBody(n)}function h(e){var t=new FormData;return e.trim().split("&").forEach(function(e){if(e){var n=e.split("="),r=n.shift().replace(/\+/g," "),o=n.join("=").replace(/\+/g," ");t.append(decodeURIComponent(r),decodeURIComponent(o))}}),t}function y(e){var t=new o;return e.replace(/\r?\n[\t ]+/g," ").split(/\r?\n/).forEach(function(e){var n=e.split(":"),r=n.shift().trim();if(r){var o=n.join(":").trim();t.append(r,o)}}),t}function v(e,t){t||(t={}),this.type="default",this.status=void 0===t.status?200:t.status,this.ok=this.status>=200&&this.status<300,this.statusText="statusText"in t?t.statusText:"OK",this.headers=new o(t.headers),this.url=t.url||"",this._initBody(e)}if(!e.fetch){var m={searchParams:"URLSearchParams"in e,iterable:"Symbol"in e&&"iterator"in Symbol,blob:"FileReader"in e&&"Blob"in e&&function(){try{return new Blob,!0}catch(e){return!1}}(),formData:"FormData"in e,arrayBuffer:"ArrayBuffer"in e};if(m.arrayBuffer)var g=["[object Int8Array]","[object Uint8Array]","[object Uint8ClampedArray]","[object Int16Array]","[object Uint16Array]","[object Int32Array]","[object Uint32Array]","[object Float32Array]","[object Float64Array]"],b=function(e){return e&&DataView.prototype.isPrototypeOf(e)},w=ArrayBuffer.isView||function(e){return e&&g.indexOf(Object.prototype.toString.call(e))>-1};o.prototype.append=function(e,r){e=t(e),r=n(r);var o=this.map[e];this.map[e]=o?o+","+r:r},o.prototype.delete=function(e){delete this.map[t(e)]},o.prototype.get=function(e){return e=t(e),this.has(e)?this.map[e]:null},o.prototype.has=function(e){return this.map.hasOwnProperty(t(e))},o.prototype.set=function(e,r){this.map[t(e)]=n(r)},o.prototype.forEach=function(e,t){for(var n in this.map)this.map.hasOwnProperty(n)&&e.call(t,this.map[n],n,this)},o.prototype.keys=function(){var e=[];return this.forEach(function(t,n){e.push(n)}),r(e)},o.prototype.values=function(){var e=[];return this.forEach(function(t){e.push(t)}),r(e)},o.prototype.entries=function(){var e=[];return this.forEach(function(t,n){e.push([n,t])}),r(e)},m.iterable&&(o.prototype[Symbol.iterator]=o.prototype.entries);var O=["DELETE","GET","HEAD","OPTIONS","POST","PUT"];d.prototype.clone=function(){return new d(this,{body:this._bodyInit})},f.call(d.prototype),f.call(v.prototype),v.prototype.clone=function(){return new v(this._bodyInit,{status:this.status,statusText:this.statusText,headers:new o(this.headers),url:this.url})},v.error=function(){var e=new v(null,{status:0,statusText:""});return e.type="error",e};var _=[301,302,303,307,308];v.redirect=function(e,t){if(-1===_.indexOf(t))throw new RangeError("Invalid status code");return new v(null,{status:t,headers:{location:e}})},e.Headers=o,e.Request=d,e.Response=v,e.fetch=function(e,t){return new Promise(function(n,r){var o=new d(e,t),i=new XMLHttpRequest;i.onload=function(){var e={status:i.status,statusText:i.statusText,headers:y(i.getAllResponseHeaders()||"")};e.url="responseURL"in i?i.responseURL:e.headers.get("X-Request-URL");var t="response"in i?i.response:i.responseText;n(new v(t,e))},i.onerror=function(){r(new TypeError("Network request failed"))},i.ontimeout=function(){r(new TypeError("Network request failed"))},i.open(o.method,o.url,!0),"include"===o.credentials?i.withCredentials=!0:"omit"===o.credentials&&(i.withCredentials=!1),"responseType"in i&&m.blob&&(i.responseType="blob"),o.headers.forEach(function(e,t){i.setRequestHeader(t,e)}),i.send(void 0===o._bodyInit?null:o._bodyInit)})},e.fetch.polyfill=!0}}("undefined"!=typeof self?self:this)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});t.addPassiveEventListener=function(e,t,n){var r=function(){var e=!1;try{var t=Object.defineProperty({},"passive",{get:function(){e=!0}});window.addEventListener("test",null,t)}catch(e){}return e}();e.addEventListener(t,n,!!r&&{passive:!0})},t.removePassiveEventListener=function(e,t,n){e.removeEventListener(t,n)}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r={registered:{},scrollEvent:{register:function(e,t){r.registered[e]=t},remove:function(e){r.registered[e]=null}}};t.default=r},function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}function o(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function i(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}Object.defineProperty(t,"__esModule",{value:!0});var u=Object.assign||function(e){for(var t=1;t=Math.floor(a)&&c=Math.floor(u),d=n.getActiveLink();p&&(o===d&&n.setActiveLink(void 0),e.props.hashSpy&&w.default.getHash()===o&&w.default.changeHash(),e.props.spy&&e.state.active&&(e.setState({active:!1}),e.props.onSetInactive&&e.props.onSetInactive(o,i))),!f||d===o&&!1!==e.state.active||(n.setActiveLink(o),e.props.hashSpy&&w.default.changeHash(o),e.props.spy&&(e.setState({active:!0}),e.props.onSetActive&&e.props.onSetActive(o,i)))}}};return r.propTypes=O,r.defaultProps={offset:0},r}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=n(48),o=function(e){return e&&e.__esModule?e:{default:e}}(r),i=n(38),a=function(e){return(0,o.default)(e,66)},u={spyCallbacks:[],spySetState:[],scrollSpyContainers:[],mount:function(e){if(e){var t=a(function(t){u.scrollHandler(e)});u.scrollSpyContainers.push(e),(0,i.addPassiveEventListener)(e,"scroll",t)}},isMounted:function(e){return-1!==u.scrollSpyContainers.indexOf(e)},currentPositionY:function(e){if(e===document){var t=void 0!==window.pageXOffset,n="CSS1Compat"===(document.compatMode||"");return t?window.pageYOffset:n?document.documentElement.scrollTop:document.body.scrollTop}return e.scrollTop},scrollHandler:function(e){(u.scrollSpyContainers[u.scrollSpyContainers.indexOf(e)].spyCallbacks||[]).forEach(function(t){return t(u.currentPositionY(e))})},addStateHandler:function(e){u.spySetState.push(e)},addSpyHandler:function(e,t){var n=u.scrollSpyContainers[u.scrollSpyContainers.indexOf(t)];n.spyCallbacks||(n.spyCallbacks=[]),n.spyCallbacks.push(e),e(u.currentPositionY(t))},updateStates:function(){u.spySetState.forEach(function(e){return e()})},unmount:function(e,t){u.scrollSpyContainers.forEach(function(e){return e.spyCallbacks&&e.spyCallbacks.length&&e.spyCallbacks.splice(e.spyCallbacks.indexOf(t),1)}),u.spySetState&&u.spySetState.length&&u.spySetState.splice(u.spySetState.indexOf(e),1),document.removeEventListener("scroll",u.scrollHandler)},update:function(){return u.scrollSpyContainers.forEach(function(e){return u.scrollHandler(e)})}};t.default=u},,function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}Object.defineProperty(t,"__esModule",{value:!0});var o=Object.assign||function(e){for(var t=1;t=o.duration?1:t(o.progress/o.duration),o.currentPositionY=o.startPositionY+Math.ceil(o.deltaTop*o.percent),o.containerElement&&o.containerElement!==document&&o.containerElement!==document.body?o.containerElement.scrollTop=o.currentPositionY:window.scrollTo(0,o.currentPositionY),o.percent<1){var i=e.bind(null,t,n);return void y.call(window,i)}f.default.registered.end&&f.default.registered.end(o.to,o.target,o.currentPositionY)},w=function(e){e.data.containerElement=e?e.containerId?document.getElementById(e.containerId):e.container&&e.container.nodeType?e.container:document:null},O=function(e,t,n,r){if(t.data=t.data||v(),window.clearTimeout(t.data.delayTimeout),l.default.subscribe(function(){t.data.cancel=!0}),w(t),t.data.start=null,t.data.cancel=!1,t.data.startPositionY=m(t),t.data.targetPositionY=t.absolute?e:e+t.data.startPositionY,t.data.startPositionY===t.data.targetPositionY)return void(f.default.registered.end&&f.default.registered.end(t.data.to,t.data.target,t.data.currentPositionY));t.data.deltaTop=Math.round(t.data.targetPositionY-t.data.startPositionY),t.data.duration=d(t.duration)(t.data.deltaTop),t.data.duration=isNaN(parseFloat(t.data.duration))?1e3:parseFloat(t.data.duration),t.data.to=n,t.data.target=r;var o=p(t),i=b.bind(null,o,t);if(t&&t.delay>0)return void(t.data.delayTimeout=window.setTimeout(function(){y.call(window,i)},t.delay));y.call(window,i)},_=function(e){return e=o({},e),e.data=e.data||v(),e.absolute=!0,e},S=function(e){O(0,_(e))},E=function(e,t){O(e,_(t))},j=function(e){e=_(e),w(e),O(g(e),e)},P=function(e,t){t=_(t),w(t),O(m(t)+e,t)};t.default={animateTopScroll:O,getAnimationType:p,scrollToTop:S,scrollToBottom:j,scrollTo:E,scrollMore:P}},function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}function o(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function i(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}Object.defineProperty(t,"__esModule",{value:!0});var u=Object.assign||function(e){for(var t=1;t=0;r--){var o=e[r];"."===o?e.splice(r,1):".."===o?(e.splice(r,1),n++):n&&(e.splice(r,1),n--)}if(t)for(;n--;n)e.unshift("..");return e}function r(e,t){if(e.filter)return e.filter(t);for(var n=[],r=0;r=-1&&!o;i--){var a=i>=0?arguments[i]:e.cwd();if("string"!=typeof a)throw new TypeError("Arguments to path.resolve must be strings");a&&(t=a+"/"+t,o="/"===a.charAt(0))}return t=n(r(t.split("/"),function(e){return!!e}),!o).join("/"),(o?"/":"")+t||"."},t.normalize=function(e){var o=t.isAbsolute(e),i="/"===a(e,-1);return e=n(r(e.split("/"),function(e){return!!e}),!o).join("/"),e||o||(e="."),e&&i&&(e+="/"),(o?"/":"")+e},t.isAbsolute=function(e){return"/"===e.charAt(0)},t.join=function(){var e=Array.prototype.slice.call(arguments,0);return t.normalize(r(e,function(e,t){if("string"!=typeof e)throw new TypeError("Arguments to path.join must be strings");return e}).join("/"))},t.relative=function(e,n){function r(e){for(var t=0;t=0&&""===e[n];n--);return t>n?[]:e.slice(t,n-t+1)}e=t.resolve(e).substr(1),n=t.resolve(n).substr(1);for(var o=r(e.split("/")),i=r(n.split("/")),a=Math.min(o.length,i.length),u=a,s=0;s=t||n<0||P&&r>=m}function c(){var e=S();if(l(e))return f(e);b=setTimeout(c,a(e))}function f(e){return b=void 0,T&&y?r(e):(y=v=void 0,g)}function p(){void 0!==b&&clearTimeout(b),E=0,y=w=v=b=void 0}function d(){return void 0===b?g:f(S())}function h(){var e=S(),n=l(e);if(y=arguments,v=this,w=e,n){if(void 0===b)return i(w);if(P)return b=setTimeout(c,t),r(w)}return void 0===b&&(b=setTimeout(c,t)),g}var y,v,m,g,b,w,E=0,j=!1,P=!1,T=!0;if("function"!=typeof e)throw new TypeError(s);return t=u(t)||0,o(n)&&(j=!!n.leading,P="maxWait"in n,m=P?O(u(n.maxWait)||0,t):m,T="trailing"in n?!!n.trailing:T),h.cancel=p,h.flush=d,h}function r(e,t,r){var i=!0,a=!0;if("function"!=typeof e)throw new TypeError(s);return o(r)&&(i="leading"in r?!!r.leading:i,a="trailing"in r?!!r.trailing:a),n(e,t,{leading:i,maxWait:t,trailing:a})}function o(e){var t=typeof e;return!!e&&("object"==t||"function"==t)}function i(e){return!!e&&"object"==typeof e}function a(e){return"symbol"==typeof e||i(e)&&w.call(e)==c}function u(e){if("number"==typeof e)return e;if(a(e))return l;if(o(e)){var t="function"==typeof e.valueOf?e.valueOf():e;e=o(t)?t+"":t}if("string"!=typeof e)return 0===e?e:+e;e=e.replace(f,"");var n=d.test(e);return n||h.test(e)?y(e.slice(2),n?2:8):p.test(e)?l:+e}var s="Expected a function",l=NaN,c="[object Symbol]",f=/^\s+|\s+$/g,p=/^[-+]0x[0-9a-f]+$/i,d=/^0b[01]+$/i,h=/^0o[0-7]+$/i,y=parseInt,v="object"==typeof t&&t&&t.Object===Object&&t,m="object"==typeof self&&self&&self.Object===Object&&self,g=v||m||Function("return this")(),b=Object.prototype,w=b.toString,O=Math.max,_=Math.min,S=function(){return g.Date.now()};e.exports=r}).call(t,n(56))},function(e,t){function n(){throw new Error("setTimeout has not been defined")}function r(){throw new Error("clearTimeout has not been defined")}function o(e){if(c===setTimeout)return setTimeout(e,0);if((c===n||!c)&&setTimeout)return c=setTimeout,setTimeout(e,0);try{return c(e,0)}catch(t){try{return c.call(null,e,0)}catch(t){return c.call(this,e,0)}}}function i(e){if(f===clearTimeout)return clearTimeout(e);if((f===r||!f)&&clearTimeout)return f=clearTimeout,clearTimeout(e);try{return f(e)}catch(t){try{return f.call(null,e)}catch(t){return f.call(this,e)}}}function a(){y&&d&&(y=!1,d.length?h=d.concat(h):v=-1,h.length&&u())}function u(){if(!y){var e=o(a);y=!0;for(var t=h.length;t;){for(d=h,h=[];++v1)for(var n=1;n=Math.floor(a)&&f=Math.floor(u),y=n.getActiveLink();return h?(o===y&&n.setActiveLink(void 0),e.props.hashSpy&&p.getHash()===o&&p.changeHash(),e.props.spy&&e.state.active&&(e.setState({active:!1}),e.props.onSetInactive&&e.props.onSetInactive()),l.updateStates()):d&&y!==o?(n.setActiveLink(o),e.props.hashSpy&&p.changeHash(o),e.props.spy&&(e.setState({active:!0}),e.props.onSetActive&&e.props.onSetActive(o)),l.updateStates()):void 0}}};return f.propTypes=d,f.defaultProps={offset:0},f},Element:function(e){console.warn("Helpers.Element is deprecated since v1.7.0");var t=function(t){function n(e){r(this,n);var t=o(this,(n.__proto__||Object.getPrototypeOf(n)).call(this,e));return t.childBindings={domNode:null},t}return i(n,t),u(n,[{key:"componentDidMount",value:function(){if("undefined"==typeof window)return!1;this.registerElems(this.props.name)}},{key:"componentWillReceiveProps",value:function(e){this.props.name!==e.name&&this.registerElems(e.name)}},{key:"componentWillUnmount",value:function(){if("undefined"==typeof window)return!1;c.unregister(this.props.name)}},{key:"registerElems",value:function(e){c.register(e,this.childBindings.domNode)}},{key:"render",value:function(){return s.createElement(e,a({},this.props,{parentBindings:this.childBindings}))}}]),n}(s.Component);return t.propTypes={name:f.string,id:f.string},t}};e.exports=h},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=n(38),o=["mousedown","mousewheel","touchmove","keydown"];t.default={subscribe:function(e){return"undefined"!=typeof document&&o.forEach(function(t){return(0,r.addPassiveEventListener)(document,t,e)})}}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default={defaultEasing:function(e){return e<.5?Math.pow(2*e,2)/2:1-Math.pow(2*(1-e),2)/2},linear:function(e){return e},easeInQuad:function(e){return e*e},easeOutQuad:function(e){return e*(2-e)},easeInOutQuad:function(e){return e<.5?2*e*e:(4-2*e)*e-1},easeInCubic:function(e){return e*e*e},easeOutCubic:function(e){return--e*e*e+1},easeInOutCubic:function(e){return e<.5?4*e*e*e:(e-1)*(2*e-2)*(2*e-2)+1},easeInQuart:function(e){return e*e*e*e},easeOutQuart:function(e){return 1- --e*e*e*e},easeInOutQuart:function(e){return e<.5?8*e*e*e*e:1-8*--e*e*e*e},easeInQuint:function(e){return e*e*e*e*e},easeOutQuint:function(e){return 1+--e*e*e*e*e},easeInOutQuint:function(e){return e<.5?16*e*e*e*e*e:1+16*--e*e*e*e*e}}},function(e,t){var n;n=function(){return this}();try{n=n||Function("return this")()||(0,eval)("this")}catch(e){"object"==typeof window&&(n=window)}e.exports=n},,,,,,,,,,,,,,,,,function(e,t){},,,,,,,function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}function o(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function i(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}Object.defineProperty(t,"__esModule",{value:!0});var u=function(){function e(e,t){for(var n=0;n1){if(i=e({path:"/"},r.defaults,i),"number"==typeof i.expires){var u=new Date;u.setMilliseconds(u.getMilliseconds()+864e5*i.expires),i.expires=u}i.expires=i.expires?i.expires.toUTCString():"";try{a=JSON.stringify(o),/^[\{\[]/.test(a)&&(o=a)}catch(e){}o=n.write?n.write(o,t):encodeURIComponent(String(o)).replace(/%(23|24|26|2B|3A|3C|3E|3D|2F|3F|40|5B|5D|5E|60|7B|7D|7C)/g,decodeURIComponent),t=encodeURIComponent(String(t)),t=t.replace(/%(23|24|26|2B|5E|60|7C)/g,decodeURIComponent),t=t.replace(/[\(\)]/g,escape);var s="";for(var l in i)i[l]&&(s+="; "+l,!0!==i[l]&&(s+="="+i[l]));return document.cookie=t+"="+o+s}t||(a={});for(var c=document.cookie?document.cookie.split("; "):[],f=/(%[0-9A-Z]{2})+/g,d=0;d-1?t:e}function p(e,t){t=t||{};var n=t.body;if(e instanceof p){if(e.bodyUsed)throw new TypeError("Already read");this.url=e.url,this.credentials=e.credentials,t.headers||(this.headers=new o(e.headers)),this.method=e.method,this.mode=e.mode,n||null==e._bodyInit||(n=e._bodyInit,e.bodyUsed=!0)}else this.url=String(e);if(this.credentials=t.credentials||this.credentials||"omit",!t.headers&&this.headers||(this.headers=new o(t.headers)),this.method=d(t.method||this.method||"GET"),this.mode=t.mode||this.mode||null,this.referrer=null,("GET"===this.method||"HEAD"===this.method)&&n)throw new TypeError("Body not allowed for GET or HEAD requests");this._initBody(n)}function h(e){var t=new FormData;return e.trim().split("&").forEach(function(e){if(e){var n=e.split("="),r=n.shift().replace(/\+/g," "),o=n.join("=").replace(/\+/g," ");t.append(decodeURIComponent(r),decodeURIComponent(o))}}),t}function y(e){var t=new o;return e.replace(/\r?\n[\t ]+/g," ").split(/\r?\n/).forEach(function(e){var n=e.split(":"),r=n.shift().trim();if(r){var o=n.join(":").trim();t.append(r,o)}}),t}function m(e,t){t||(t={}),this.type="default",this.status=void 0===t.status?200:t.status,this.ok=this.status>=200&&this.status<300,this.statusText="statusText"in t?t.statusText:"OK",this.headers=new o(t.headers),this.url=t.url||"",this._initBody(e)}if(!e.fetch){var g={searchParams:"URLSearchParams"in e,iterable:"Symbol"in e&&"iterator"in Symbol,blob:"FileReader"in e&&"Blob"in e&&function(){try{return new Blob,!0}catch(e){return!1}}(),formData:"FormData"in e,arrayBuffer:"ArrayBuffer"in e};if(g.arrayBuffer)var v=["[object Int8Array]","[object Uint8Array]","[object Uint8ClampedArray]","[object Int16Array]","[object Uint16Array]","[object Int32Array]","[object Uint32Array]","[object Float32Array]","[object Float64Array]"],b=function(e){return e&&DataView.prototype.isPrototypeOf(e)},w=ArrayBuffer.isView||function(e){return e&&v.indexOf(Object.prototype.toString.call(e))>-1};o.prototype.append=function(e,r){e=t(e),r=n(r);var o=this.map[e];this.map[e]=o?o+","+r:r},o.prototype.delete=function(e){delete this.map[t(e)]},o.prototype.get=function(e){return e=t(e),this.has(e)?this.map[e]:null},o.prototype.has=function(e){return this.map.hasOwnProperty(t(e))},o.prototype.set=function(e,r){this.map[t(e)]=n(r)},o.prototype.forEach=function(e,t){for(var n in this.map)this.map.hasOwnProperty(n)&&e.call(t,this.map[n],n,this)},o.prototype.keys=function(){var e=[];return this.forEach(function(t,n){e.push(n)}),r(e)},o.prototype.values=function(){var e=[];return this.forEach(function(t){e.push(t)}),r(e)},o.prototype.entries=function(){var e=[];return this.forEach(function(t,n){e.push([n,t])}),r(e)},g.iterable&&(o.prototype[Symbol.iterator]=o.prototype.entries);var _=["DELETE","GET","HEAD","OPTIONS","POST","PUT"];p.prototype.clone=function(){return new p(this,{body:this._bodyInit})},f.call(p.prototype),f.call(m.prototype),m.prototype.clone=function(){return new m(this._bodyInit,{status:this.status,statusText:this.statusText,headers:new o(this.headers),url:this.url})},m.error=function(){var e=new m(null,{status:0,statusText:""});return e.type="error",e};var O=[301,302,303,307,308];m.redirect=function(e,t){if(-1===O.indexOf(t))throw new RangeError("Invalid status code");return new m(null,{status:t,headers:{location:e}})},e.Headers=o,e.Request=p,e.Response=m,e.fetch=function(e,t){return new Promise(function(n,r){var o=new p(e,t),i=new XMLHttpRequest;i.onload=function(){var e={status:i.status,statusText:i.statusText,headers:y(i.getAllResponseHeaders()||"")};e.url="responseURL"in i?i.responseURL:e.headers.get("X-Request-URL");var t="response"in i?i.response:i.responseText;n(new m(t,e))},i.onerror=function(){r(new TypeError("Network request failed"))},i.ontimeout=function(){r(new TypeError("Network request failed"))},i.open(o.method,o.url,!0),"include"===o.credentials?i.withCredentials=!0:"omit"===o.credentials&&(i.withCredentials=!1),"responseType"in i&&g.blob&&(i.responseType="blob"),o.headers.forEach(function(e,t){i.setRequestHeader(t,e)}),i.send(void 0===o._bodyInit?null:o._bodyInit)})},e.fetch.polyfill=!0}}("undefined"!=typeof self?self:this)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});t.addPassiveEventListener=function(e,t,n){var r=function(){var e=!1;try{var t=Object.defineProperty({},"passive",{get:function(){e=!0}});window.addEventListener("test",null,t)}catch(e){}return e}();e.addEventListener(t,n,!!r&&{passive:!0})},t.removePassiveEventListener=function(e,t,n){e.removeEventListener(t,n)}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r={registered:{},scrollEvent:{register:function(e,t){r.registered[e]=t},remove:function(e){r.registered[e]=null}}};t.default=r},function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}function o(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function i(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}Object.defineProperty(t,"__esModule",{value:!0});var u=Object.assign||function(e){for(var t=1;t=Math.floor(a)&&c=Math.floor(u),p=n.getActiveLink();d&&(o===p&&n.setActiveLink(void 0),e.props.hashSpy&&w.default.getHash()===o&&w.default.changeHash(),e.props.spy&&e.state.active&&(e.setState({active:!1}),e.props.onSetInactive&&e.props.onSetInactive(o,i))),!f||p===o&&!1!==e.state.active||(n.setActiveLink(o),e.props.hashSpy&&w.default.changeHash(o),e.props.spy&&(e.setState({active:!0}),e.props.onSetActive&&e.props.onSetActive(o,i)))}}};return r.propTypes=_,r.defaultProps={offset:0},r}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=n(28),o=function(e){return e&&e.__esModule?e:{default:e}}(r),i=n(18),a=function(e){return(0,o.default)(e,66)},u={spyCallbacks:[],spySetState:[],scrollSpyContainers:[],mount:function(e){if(e){var t=a(function(t){u.scrollHandler(e)});u.scrollSpyContainers.push(e),(0,i.addPassiveEventListener)(e,"scroll",t)}},isMounted:function(e){return-1!==u.scrollSpyContainers.indexOf(e)},currentPositionY:function(e){if(e===document){var t=void 0!==window.pageXOffset,n="CSS1Compat"===(document.compatMode||"");return t?window.pageYOffset:n?document.documentElement.scrollTop:document.body.scrollTop}return e.scrollTop},scrollHandler:function(e){(u.scrollSpyContainers[u.scrollSpyContainers.indexOf(e)].spyCallbacks||[]).forEach(function(t){return t(u.currentPositionY(e))})},addStateHandler:function(e){u.spySetState.push(e)},addSpyHandler:function(e,t){var n=u.scrollSpyContainers[u.scrollSpyContainers.indexOf(t)];n.spyCallbacks||(n.spyCallbacks=[]),n.spyCallbacks.push(e),e(u.currentPositionY(t))},updateStates:function(){u.spySetState.forEach(function(e){return e()})},unmount:function(e,t){u.scrollSpyContainers.forEach(function(e){return e.spyCallbacks&&e.spyCallbacks.length&&e.spyCallbacks.splice(e.spyCallbacks.indexOf(t),1)}),u.spySetState&&u.spySetState.length&&u.spySetState.splice(u.spySetState.indexOf(e),1),document.removeEventListener("scroll",u.scrollHandler)},update:function(){return u.scrollSpyContainers.forEach(function(e){return u.scrollHandler(e)})}};t.default=u},,function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}Object.defineProperty(t,"__esModule",{value:!0});var o=Object.assign||function(e){for(var t=1;t=o.duration?1:t(o.progress/o.duration),o.currentPositionY=o.startPositionY+Math.ceil(o.deltaTop*o.percent),o.containerElement&&o.containerElement!==document&&o.containerElement!==document.body?o.containerElement.scrollTop=o.currentPositionY:window.scrollTo(0,o.currentPositionY),o.percent<1){var i=e.bind(null,t,n);return void y.call(window,i)}f.default.registered.end&&f.default.registered.end(o.to,o.target,o.currentPositionY)},w=function(e){e.data.containerElement=e?e.containerId?document.getElementById(e.containerId):e.container&&e.container.nodeType?e.container:document:null},_=function(e,t,n,r){if(t.data=t.data||m(),window.clearTimeout(t.data.delayTimeout),l.default.subscribe(function(){t.data.cancel=!0}),w(t),t.data.start=null,t.data.cancel=!1,t.data.startPositionY=g(t),t.data.targetPositionY=t.absolute?e:e+t.data.startPositionY,t.data.startPositionY===t.data.targetPositionY)return void(f.default.registered.end&&f.default.registered.end(t.data.to,t.data.target,t.data.currentPositionY));t.data.deltaTop=Math.round(t.data.targetPositionY-t.data.startPositionY),t.data.duration=p(t.duration)(t.data.deltaTop),t.data.duration=isNaN(parseFloat(t.data.duration))?1e3:parseFloat(t.data.duration),t.data.to=n,t.data.target=r;var o=d(t),i=b.bind(null,o,t);if(t&&t.delay>0)return void(t.data.delayTimeout=window.setTimeout(function(){y.call(window,i)},t.delay));y.call(window,i)},O=function(e){return e=o({},e),e.data=e.data||m(),e.absolute=!0,e},E=function(e){_(0,O(e))},S=function(e,t){_(e,O(t))},k=function(e){e=O(e),w(e),_(v(e),e)},T=function(e,t){t=O(t),w(t),_(g(t)+e,t)};t.default={animateTopScroll:_,getAnimationType:d,scrollToTop:E,scrollToBottom:k,scrollTo:S,scrollMore:T}},function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}function o(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function i(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}Object.defineProperty(t,"__esModule",{value:!0});var u=Object.assign||function(e){for(var t=1;t=0;r--){var o=e[r];"."===o?e.splice(r,1):".."===o?(e.splice(r,1),n++):n&&(e.splice(r,1),n--)}if(t)for(;n--;n)e.unshift("..");return e}function r(e,t){if(e.filter)return e.filter(t);for(var n=[],r=0;r=-1&&!o;i--){var a=i>=0?arguments[i]:e.cwd();if("string"!=typeof a)throw new TypeError("Arguments to path.resolve must be strings");a&&(t=a+"/"+t,o="/"===a.charAt(0))}return t=n(r(t.split("/"),function(e){return!!e}),!o).join("/"),(o?"/":"")+t||"."},t.normalize=function(e){var o=t.isAbsolute(e),i="/"===a(e,-1);return e=n(r(e.split("/"),function(e){return!!e}),!o).join("/"),e||o||(e="."),e&&i&&(e+="/"),(o?"/":"")+e},t.isAbsolute=function(e){return"/"===e.charAt(0)},t.join=function(){var e=Array.prototype.slice.call(arguments,0);return t.normalize(r(e,function(e,t){if("string"!=typeof e)throw new TypeError("Arguments to path.join must be strings");return e}).join("/"))},t.relative=function(e,n){function r(e){for(var t=0;t=0&&""===e[n];n--);return t>n?[]:e.slice(t,n-t+1)}e=t.resolve(e).substr(1),n=t.resolve(n).substr(1);for(var o=r(e.split("/")),i=r(n.split("/")),a=Math.min(o.length,i.length),u=a,s=0;s=t||n<0||T&&r>=g}function c(){var e=E();if(l(e))return f(e);b=setTimeout(c,a(e))}function f(e){return b=void 0,P&&y?r(e):(y=m=void 0,v)}function d(){void 0!==b&&clearTimeout(b),S=0,y=w=m=b=void 0}function p(){return void 0===b?v:f(E())}function h(){var e=E(),n=l(e);if(y=arguments,m=this,w=e,n){if(void 0===b)return i(w);if(T)return b=setTimeout(c,t),r(w)}return void 0===b&&(b=setTimeout(c,t)),v}var y,m,g,v,b,w,S=0,k=!1,T=!1,P=!0;if("function"!=typeof e)throw new TypeError(s);return t=u(t)||0,o(n)&&(k=!!n.leading,T="maxWait"in n,g=T?_(u(n.maxWait)||0,t):g,P="trailing"in n?!!n.trailing:P),h.cancel=d,h.flush=p,h}function r(e,t,r){var i=!0,a=!0;if("function"!=typeof e)throw new TypeError(s);return o(r)&&(i="leading"in r?!!r.leading:i,a="trailing"in r?!!r.trailing:a),n(e,t,{leading:i,maxWait:t,trailing:a})}function o(e){var t=typeof e;return!!e&&("object"==t||"function"==t)}function i(e){return!!e&&"object"==typeof e}function a(e){return"symbol"==typeof e||i(e)&&w.call(e)==c}function u(e){if("number"==typeof e)return e;if(a(e))return l;if(o(e)){var t="function"==typeof e.valueOf?e.valueOf():e;e=o(t)?t+"":t}if("string"!=typeof e)return 0===e?e:+e;e=e.replace(f,"");var n=p.test(e);return n||h.test(e)?y(e.slice(2),n?2:8):d.test(e)?l:+e}var s="Expected a function",l=NaN,c="[object Symbol]",f=/^\s+|\s+$/g,d=/^[-+]0x[0-9a-f]+$/i,p=/^0b[01]+$/i,h=/^0o[0-7]+$/i,y=parseInt,m="object"==typeof t&&t&&t.Object===Object&&t,g="object"==typeof self&&self&&self.Object===Object&&self,v=m||g||Function("return this")(),b=Object.prototype,w=b.toString,_=Math.max,O=Math.min,E=function(){return v.Date.now()};e.exports=r}).call(t,n(36))},function(e,t){function n(){throw new Error("setTimeout has not been defined")}function r(){throw new Error("clearTimeout has not been defined")}function o(e){if(c===setTimeout)return setTimeout(e,0);if((c===n||!c)&&setTimeout)return c=setTimeout,setTimeout(e,0);try{return c(e,0)}catch(t){try{return c.call(null,e,0)}catch(t){return c.call(this,e,0)}}}function i(e){if(f===clearTimeout)return clearTimeout(e);if((f===r||!f)&&clearTimeout)return f=clearTimeout,clearTimeout(e);try{return f(e)}catch(t){try{return f.call(null,e)}catch(t){return f.call(this,e)}}}function a(){y&&p&&(y=!1,p.length?h=p.concat(h):m=-1,h.length&&u())}function u(){if(!y){var e=o(a);y=!0;for(var t=h.length;t;){for(p=h,h=[];++m1)for(var n=1;n=Math.floor(a)&&f=Math.floor(u),y=n.getActiveLink();return h?(o===y&&n.setActiveLink(void 0),e.props.hashSpy&&d.getHash()===o&&d.changeHash(),e.props.spy&&e.state.active&&(e.setState({active:!1}),e.props.onSetInactive&&e.props.onSetInactive()),l.updateStates()):p&&y!==o?(n.setActiveLink(o),e.props.hashSpy&&d.changeHash(o),e.props.spy&&(e.setState({active:!0}),e.props.onSetActive&&e.props.onSetActive(o)),l.updateStates()):void 0}}};return f.propTypes=p,f.defaultProps={offset:0},f},Element:function(e){console.warn("Helpers.Element is deprecated since v1.7.0");var t=function(t){function n(e){r(this,n);var t=o(this,(n.__proto__||Object.getPrototypeOf(n)).call(this,e));return t.childBindings={domNode:null},t}return i(n,t),u(n,[{key:"componentDidMount",value:function(){if("undefined"==typeof window)return!1;this.registerElems(this.props.name)}},{key:"componentWillReceiveProps",value:function(e){this.props.name!==e.name&&this.registerElems(e.name)}},{key:"componentWillUnmount",value:function(){if("undefined"==typeof window)return!1;c.unregister(this.props.name)}},{key:"registerElems",value:function(e){c.register(e,this.childBindings.domNode)}},{key:"render",value:function(){return s.createElement(e,a({},this.props,{parentBindings:this.childBindings}))}}]),n}(s.Component);return t.propTypes={name:f.string,id:f.string},t}};e.exports=h},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=n(18),o=["mousedown","mousewheel","touchmove","keydown"];t.default={subscribe:function(e){return"undefined"!=typeof document&&o.forEach(function(t){return(0,r.addPassiveEventListener)(document,t,e)})}}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default={defaultEasing:function(e){return e<.5?Math.pow(2*e,2)/2:1-Math.pow(2*(1-e),2)/2},linear:function(e){return e},easeInQuad:function(e){return e*e},easeOutQuad:function(e){return e*(2-e)},easeInOutQuad:function(e){return e<.5?2*e*e:(4-2*e)*e-1},easeInCubic:function(e){return e*e*e},easeOutCubic:function(e){return--e*e*e+1},easeInOutCubic:function(e){return e<.5?4*e*e*e:(e-1)*(2*e-2)*(2*e-2)+1},easeInQuart:function(e){return e*e*e*e},easeOutQuart:function(e){return 1- --e*e*e*e},easeInOutQuart:function(e){return e<.5?8*e*e*e*e:1-8*--e*e*e*e},easeInQuint:function(e){return e*e*e*e*e},easeOutQuint:function(e){return 1+--e*e*e*e*e},easeInOutQuint:function(e){return e<.5?16*e*e*e*e*e:1+16*--e*e*e*e*e}}},function(e,t){var n;n=function(){return this}();try{n=n||Function("return this")()||(0,eval)("this")}catch(e){"object"==typeof window&&(n=window)}e.exports=n},,,,,,,,,,,,,,,,,function(e,t){},,,,,,,function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}function o(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function i(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}Object.defineProperty(t,"__esModule",{value:!0});var u=function(){function e(e,t){for(var n=0;n1&&/^\/[^\/]/.test(""+e)?""+window.rootPath+e:e},t.parseJSONStr=function(e){try{return JSON.parse(e)}catch(t){return e}}},function(e,t,n){e.exports=n(33)()},function(e,t){e.exports=ReactDOM},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default={rootPath:"",port:8080,domain:"seata.io",defaultSearch:"baidu",defaultLanguage:"en-us","en-us":{pageMenu:[{key:"home",text:"HOME",link:"/en-us/index.html"},{key:"docs",text:"DOCS",link:"/en-us/docs/overview/what-is-seata.html"},{key:"developers",text:"DEVELOPERS",link:"/en-us/docs/developers/developers_dev.html"},{key:"blog",text:"BLOG",link:"/en-us/blog/index.html"},{key:"community",text:"COMMUNITY",link:"/en-us/community/index.html"},{key:"download",text:"DOWNLOAD",link:"/en-us/blog/download.html"}],vision:{title:"Vision",content:"Seata is an Alibaba open source distributed transaction solution that delivers high performance and easy to use distributed transaction services under a microservices architecture."},documentation:{title:"Documentation",list:[{text:"What is Seata?",link:"/en-us/docs/overview/what-is-seata.html"},{text:"Quick Start",link:"/en-us/docs/user/quickstart.html"},{text:"Report a doc issue",link:"https://github.com/seata/seata.github.io/issues/new"},{text:"Edit This Page on GitHub",link:"https://github.com/seata/seata.github.io"}]},resources:{title:"Resources",list:[{text:"Blog",link:"/en-us/blog/index.html"},{text:"Community",link:"/en-us/community/index.html"}]},copyright:"Copyright © 2019 Seata"},"zh-cn":{pageMenu:[{key:"home",text:"首页",link:"/zh-cn/index.html"},{key:"docs",text:"文档",link:"/zh-cn/docs/overview/what-is-seata.html"},{key:"developers",text:"开发者",link:"/zh-cn/docs/developers/developers_dev.html"},{key:"blog",text:"博客",link:"/zh-cn/blog/index.html"},{key:"community",text:"社区",link:"/zh-cn/community/index.html"},{key:"download",text:"下载",link:"/zh-cn/blog/download.html"}],vision:{title:"愿景",content:"Seata 是一款阿里巴巴开源的分布式事务解决方案,致力于在微服务架构下提供高性能和简单易用的分布式事务服务。"},documentation:{title:"文档",list:[{text:"Seata 是什么?",link:"/zh-cn/docs/overview/what-is-seata.html"},{text:"快速开始",link:"/zh-cn/docs/user/quickstart.html"},{text:"报告文档问题",link:"https://github.com/seata/seata.github.io/issues/new"},{text:"在Github上编辑此文档",link:"https://github.com/seata/seata.github.io"}]},resources:{title:"资源",list:[{text:"博客",link:"/zh-cn/blog/index.html"},{text:"社区",link:"/zh-cn/community/index.html"}]},copyright:"Copyright © 2019 Seata"}}},function(e,t,n){var r,o;/*! +!function(e){function t(r){if(n[r])return n[r].exports;var a=n[r]={i:r,l:!1,exports:{}};return e[r].call(a.exports,a,a.exports,t),a.l=!0,a.exports}var n={};t.m=e,t.c=n,t.i=function(e){return e},t.d=function(e,n,r){t.o(e,n)||Object.defineProperty(e,n,{configurable:!1,enumerable:!0,get:r})},t.n=function(e){var n=e&&e.__esModule?function(){return e.default}:function(){return e};return t.d(n,"a",n),n},t.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},t.p="/build/",t(t.s=61)}([function(e,t){e.exports=React},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});t.throttle=function(e,t){var n=null;return function(){for(var r=arguments.length,a=Array(r),o=0;o1&&/^\/[^\/]/.test(""+e)?""+window.rootPath+e:e},t.parseJSONStr=function(e){try{return JSON.parse(e)}catch(t){return e}}},function(e,t,n){e.exports=n(13)()},function(e,t){e.exports=ReactDOM},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default={rootPath:"",port:8080,domain:"seata.io",defaultSearch:"baidu",defaultLanguage:"en-us","en-us":{pageMenu:[{key:"home",text:"HOME",link:"/en-us/index.html"},{key:"docs",text:"DOCS",link:"/en-us/docs/overview/what-is-seata.html"},{key:"developers",text:"DEVELOPERS",link:"/en-us/docs/developers/developers_dev.html"},{key:"blog",text:"BLOG",link:"/en-us/blog/index.html"},{key:"community",text:"COMMUNITY",link:"/en-us/community/index.html"},{key:"download",text:"DOWNLOAD",link:"/en-us/blog/download.html"}],vision:{title:"Vision",content:"Seata is an Alibaba open source distributed transaction solution that delivers high performance and easy to use distributed transaction services under a microservices architecture."},documentation:{title:"Documentation",list:[{text:"What is Seata?",link:"/en-us/docs/overview/what-is-seata.html"},{text:"Quick Start",link:"/en-us/docs/user/quickstart.html"},{text:"Report a doc issue",link:"https://github.com/seata/seata.github.io/issues/new"},{text:"Edit This Page on GitHub",link:"https://github.com/seata/seata.github.io"}]},resources:{title:"Resources",list:[{text:"Blog",link:"/en-us/blog/index.html"},{text:"Community",link:"/en-us/community/index.html"}]},copyright:"Copyright © 2019 Seata"},"zh-cn":{pageMenu:[{key:"home",text:"首页",link:"/zh-cn/index.html"},{key:"docs",text:"文档",link:"/zh-cn/docs/overview/what-is-seata.html"},{key:"developers",text:"开发者",link:"/zh-cn/docs/developers/developers_dev.html"},{key:"blog",text:"博客",link:"/zh-cn/blog/index.html"},{key:"community",text:"社区",link:"/zh-cn/community/index.html"},{key:"download",text:"下载",link:"/zh-cn/blog/download.html"}],vision:{title:"愿景",content:"Seata 是一款阿里巴巴开源的分布式事务解决方案,致力于在微服务架构下提供高性能和简单易用的分布式事务服务。"},documentation:{title:"文档",list:[{text:"Seata 是什么?",link:"/zh-cn/docs/overview/what-is-seata.html"},{text:"快速开始",link:"/zh-cn/docs/user/quickstart.html"},{text:"报告文档问题",link:"https://github.com/seata/seata.github.io/issues/new"},{text:"在Github上编辑此文档",link:"https://github.com/seata/seata.github.io"}]},resources:{title:"资源",list:[{text:"博客",link:"/zh-cn/blog/index.html"},{text:"社区",link:"/zh-cn/community/index.html"}]},copyright:"Copyright © 2019 Seata"}}},function(e,t,n){var r,a;/*! Copyright (c) 2017 Jed Watson. Licensed under the MIT License (MIT), see http://jedwatson.github.io/classnames */ -!function(){"use strict";function n(){for(var e=[],t=0;t1&&void 0!==arguments[1]?arguments[1]:t.key)+arguments[2]})}},{key:"key",get:function(){return this.childDescriptor.key}},{key:"parentNotation",get:function(){return this.parentKlass.constructor.name+"#"+this.parentPropertySignature}},{key:"childNotation",get:function(){return this.childKlass.constructor.name+"#"+this.childPropertySignature}},{key:"parentTopic",get:function(){return this._getTopic(this.parentDescriptor)}},{key:"childTopic",get:function(){return this._getTopic(this.childDescriptor)}},{key:"parentPropertySignature",get:function(){return this._extractTopicSignature(this.parentTopic)}},{key:"childPropertySignature",get:function(){return this._extractTopicSignature(this.childTopic)}}]),h(e,[{key:"assert",value:function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";!0!==e&&this.error("{child} does not properly override {parent}"+t)}},{key:"error",value:function(e){var t=this;throw e=e.replace("{parent}",function(e){return t.parentNotation}).replace("{child}",function(e){return t.childNotation}),new SyntaxError(e)}}]),e}(),y=[function(e){return e.toLowerCase()},function(e){return e.toUpperCase()},function(e){return e+"s"},function(e){return e.slice(0,-1)},function(e){return e.slice(1,e.length)}]},function(e,t,n){"use strict";function r(e,t,r,c){var s=u(c,3),f=s[0],d=void 0===f?null:f,p=s[1],h=void 0!==p&&p,m=s[2],g=void 0===m?l:m;if(!o.__enabled)return o.__warned||(g.warn("console.profile is not supported. All @profile decorators are disabled."),o.__warned=!0),r;var y=r.value;if(null===d&&(d=e.constructor.name+"."+t),"function"!=typeof y)throw new SyntaxError("@profile can only be used on functions, not: "+y);return a({},r,{value:function(){var e=Date.now(),t=n.i(i.c)(this);(!0===h&&!t.profileLastRan||!1===h||"number"==typeof h&&e-t.profileLastRan>h||"function"==typeof h&&h.apply(this,arguments))&&(g.profile(d),t.profileLastRan=e);try{return y.apply(this,arguments)}finally{g.profileEnd(d)}}})}function o(){for(var e=arguments.length,t=Array(e),o=0;o1){if(i=e({path:"/"},r.defaults,i),"number"==typeof i.expires){var u=new Date;u.setMilliseconds(u.getMilliseconds()+864e5*i.expires),i.expires=u}i.expires=i.expires?i.expires.toUTCString():"";try{a=JSON.stringify(o),/^[\{\[]/.test(a)&&(o=a)}catch(e){}o=n.write?n.write(o,t):encodeURIComponent(String(o)).replace(/%(23|24|26|2B|3A|3C|3E|3D|2F|3F|40|5B|5D|5E|60|7B|7D|7C)/g,decodeURIComponent),t=encodeURIComponent(String(t)),t=t.replace(/%(23|24|26|2B|5E|60|7C)/g,decodeURIComponent),t=t.replace(/[\(\)]/g,escape);var l="";for(var c in i)i[c]&&(l+="; "+c,!0!==i[c]&&(l+="="+i[c]));return document.cookie=t+"="+o+l}t||(a={});for(var s=document.cookie?document.cookie.split("; "):[],f=/(%[0-9A-Z]{2})+/g,d=0;d1){if(o=e({path:"/"},r.defaults,o),"number"==typeof o.expires){var l=new Date;l.setMilliseconds(l.getMilliseconds()+864e5*o.expires),o.expires=l}o.expires=o.expires?o.expires.toUTCString():"";try{i=JSON.stringify(a),/^[\{\[]/.test(i)&&(a=i)}catch(e){}a=n.write?n.write(a,t):encodeURIComponent(String(a)).replace(/%(23|24|26|2B|3A|3C|3E|3D|2F|3F|40|5B|5D|5E|60|7B|7D|7C)/g,decodeURIComponent),t=encodeURIComponent(String(t)),t=t.replace(/%(23|24|26|2B|5E|60|7C)/g,decodeURIComponent),t=t.replace(/[\(\)]/g,escape);var u="";for(var c in o)o[c]&&(u+="; "+c,!0!==o[c]&&(u+="="+o[c]));return document.cookie=t+"="+a+u}t||(i={});for(var s=document.cookie?document.cookie.split("; "):[],f=/(%[0-9A-Z]{2})+/g,d=0;d1&&/^\/[^\/]/.test(""+e)?""+window.rootPath+e:e},t.parseJSONStr=function(e){try{return JSON.parse(e)}catch(t){return e}}},function(e,t,n){e.exports=n(33)()},function(e,t){e.exports=ReactDOM},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default={rootPath:"",port:8080,domain:"seata.io",defaultSearch:"baidu",defaultLanguage:"en-us","en-us":{pageMenu:[{key:"home",text:"HOME",link:"/en-us/index.html"},{key:"docs",text:"DOCS",link:"/en-us/docs/overview/what-is-seata.html"},{key:"developers",text:"DEVELOPERS",link:"/en-us/docs/developers/developers_dev.html"},{key:"blog",text:"BLOG",link:"/en-us/blog/index.html"},{key:"community",text:"COMMUNITY",link:"/en-us/community/index.html"},{key:"download",text:"DOWNLOAD",link:"/en-us/blog/download.html"}],vision:{title:"Vision",content:"Seata is an Alibaba open source distributed transaction solution that delivers high performance and easy to use distributed transaction services under a microservices architecture."},documentation:{title:"Documentation",list:[{text:"What is Seata?",link:"/en-us/docs/overview/what-is-seata.html"},{text:"Quick Start",link:"/en-us/docs/user/quickstart.html"},{text:"Report a doc issue",link:"https://github.com/seata/seata.github.io/issues/new"},{text:"Edit This Page on GitHub",link:"https://github.com/seata/seata.github.io"}]},resources:{title:"Resources",list:[{text:"Blog",link:"/en-us/blog/index.html"},{text:"Community",link:"/en-us/community/index.html"}]},copyright:"Copyright © 2019 Seata"},"zh-cn":{pageMenu:[{key:"home",text:"首页",link:"/zh-cn/index.html"},{key:"docs",text:"文档",link:"/zh-cn/docs/overview/what-is-seata.html"},{key:"developers",text:"开发者",link:"/zh-cn/docs/developers/developers_dev.html"},{key:"blog",text:"博客",link:"/zh-cn/blog/index.html"},{key:"community",text:"社区",link:"/zh-cn/community/index.html"},{key:"download",text:"下载",link:"/zh-cn/blog/download.html"}],vision:{title:"愿景",content:"Seata 是一款阿里巴巴开源的分布式事务解决方案,致力于在微服务架构下提供高性能和简单易用的分布式事务服务。"},documentation:{title:"文档",list:[{text:"Seata 是什么?",link:"/zh-cn/docs/overview/what-is-seata.html"},{text:"快速开始",link:"/zh-cn/docs/user/quickstart.html"},{text:"报告文档问题",link:"https://github.com/seata/seata.github.io/issues/new"},{text:"在Github上编辑此文档",link:"https://github.com/seata/seata.github.io"}]},resources:{title:"资源",list:[{text:"博客",link:"/zh-cn/blog/index.html"},{text:"社区",link:"/zh-cn/community/index.html"}]},copyright:"Copyright © 2019 Seata"}}},function(e,t,n){var r,o;/*! +!function(e){function t(r){if(n[r])return n[r].exports;var o=n[r]={i:r,l:!1,exports:{}};return e[r].call(o.exports,o,o.exports,t),o.l=!0,o.exports}var n={};t.m=e,t.c=n,t.i=function(e){return e},t.d=function(e,n,r){t.o(e,n)||Object.defineProperty(e,n,{configurable:!1,enumerable:!0,get:r})},t.n=function(e){var n=e&&e.__esModule?function(){return e.default}:function(){return e};return t.d(n,"a",n),n},t.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},t.p="/build/",t(t.s=62)}([function(e,t){e.exports=React},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});t.throttle=function(e,t){var n=null;return function(){for(var r=arguments.length,o=Array(r),i=0;i1&&/^\/[^\/]/.test(""+e)?""+window.rootPath+e:e},t.parseJSONStr=function(e){try{return JSON.parse(e)}catch(t){return e}}},function(e,t,n){e.exports=n(13)()},function(e,t){e.exports=ReactDOM},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default={rootPath:"",port:8080,domain:"seata.io",defaultSearch:"baidu",defaultLanguage:"en-us","en-us":{pageMenu:[{key:"home",text:"HOME",link:"/en-us/index.html"},{key:"docs",text:"DOCS",link:"/en-us/docs/overview/what-is-seata.html"},{key:"developers",text:"DEVELOPERS",link:"/en-us/docs/developers/developers_dev.html"},{key:"blog",text:"BLOG",link:"/en-us/blog/index.html"},{key:"community",text:"COMMUNITY",link:"/en-us/community/index.html"},{key:"download",text:"DOWNLOAD",link:"/en-us/blog/download.html"}],vision:{title:"Vision",content:"Seata is an Alibaba open source distributed transaction solution that delivers high performance and easy to use distributed transaction services under a microservices architecture."},documentation:{title:"Documentation",list:[{text:"What is Seata?",link:"/en-us/docs/overview/what-is-seata.html"},{text:"Quick Start",link:"/en-us/docs/user/quickstart.html"},{text:"Report a doc issue",link:"https://github.com/seata/seata.github.io/issues/new"},{text:"Edit This Page on GitHub",link:"https://github.com/seata/seata.github.io"}]},resources:{title:"Resources",list:[{text:"Blog",link:"/en-us/blog/index.html"},{text:"Community",link:"/en-us/community/index.html"}]},copyright:"Copyright © 2019 Seata"},"zh-cn":{pageMenu:[{key:"home",text:"首页",link:"/zh-cn/index.html"},{key:"docs",text:"文档",link:"/zh-cn/docs/overview/what-is-seata.html"},{key:"developers",text:"开发者",link:"/zh-cn/docs/developers/developers_dev.html"},{key:"blog",text:"博客",link:"/zh-cn/blog/index.html"},{key:"community",text:"社区",link:"/zh-cn/community/index.html"},{key:"download",text:"下载",link:"/zh-cn/blog/download.html"}],vision:{title:"愿景",content:"Seata 是一款阿里巴巴开源的分布式事务解决方案,致力于在微服务架构下提供高性能和简单易用的分布式事务服务。"},documentation:{title:"文档",list:[{text:"Seata 是什么?",link:"/zh-cn/docs/overview/what-is-seata.html"},{text:"快速开始",link:"/zh-cn/docs/user/quickstart.html"},{text:"报告文档问题",link:"https://github.com/seata/seata.github.io/issues/new"},{text:"在Github上编辑此文档",link:"https://github.com/seata/seata.github.io"}]},resources:{title:"资源",list:[{text:"博客",link:"/zh-cn/blog/index.html"},{text:"社区",link:"/zh-cn/community/index.html"}]},copyright:"Copyright © 2019 Seata"}}},function(e,t,n){var r,o;/*! Copyright (c) 2017 Jed Watson. Licensed under the MIT License (MIT), see http://jedwatson.github.io/classnames */ -!function(){"use strict";function n(){for(var e=[],t=0;t1&&void 0!==arguments[1]?arguments[1]:t.key)+arguments[2]})}},{key:"key",get:function(){return this.childDescriptor.key}},{key:"parentNotation",get:function(){return this.parentKlass.constructor.name+"#"+this.parentPropertySignature}},{key:"childNotation",get:function(){return this.childKlass.constructor.name+"#"+this.childPropertySignature}},{key:"parentTopic",get:function(){return this._getTopic(this.parentDescriptor)}},{key:"childTopic",get:function(){return this._getTopic(this.childDescriptor)}},{key:"parentPropertySignature",get:function(){return this._extractTopicSignature(this.parentTopic)}},{key:"childPropertySignature",get:function(){return this._extractTopicSignature(this.childTopic)}}]),h(e,[{key:"assert",value:function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";!0!==e&&this.error("{child} does not properly override {parent}"+t)}},{key:"error",value:function(e){var t=this;throw e=e.replace("{parent}",function(e){return t.parentNotation}).replace("{child}",function(e){return t.childNotation}),new SyntaxError(e)}}]),e}(),v=[function(e){return e.toLowerCase()},function(e){return e.toUpperCase()},function(e){return e+"s"},function(e){return e.slice(0,-1)},function(e){return e.slice(1,e.length)}]},function(e,t,n){"use strict";function r(e,t,r,s){var c=u(s,3),f=c[0],d=void 0===f?null:f,p=c[1],h=void 0!==p&&p,y=c[2],m=void 0===y?l:y;if(!o.__enabled)return o.__warned||(m.warn("console.profile is not supported. All @profile decorators are disabled."),o.__warned=!0),r;var v=r.value;if(null===d&&(d=e.constructor.name+"."+t),"function"!=typeof v)throw new SyntaxError("@profile can only be used on functions, not: "+v);return a({},r,{value:function(){var e=Date.now(),t=n.i(i.c)(this);(!0===h&&!t.profileLastRan||!1===h||"number"==typeof h&&e-t.profileLastRan>h||"function"==typeof h&&h.apply(this,arguments))&&(m.profile(d),t.profileLastRan=e);try{return v.apply(this,arguments)}finally{m.profileEnd(d)}}})}function o(){for(var e=arguments.length,t=Array(e),o=0;o1){if(i=e({path:"/"},r.defaults,i),"number"==typeof i.expires){var u=new Date;u.setMilliseconds(u.getMilliseconds()+864e5*i.expires),i.expires=u}i.expires=i.expires?i.expires.toUTCString():"";try{a=JSON.stringify(o),/^[\{\[]/.test(a)&&(o=a)}catch(e){}o=n.write?n.write(o,t):encodeURIComponent(String(o)).replace(/%(23|24|26|2B|3A|3C|3E|3D|2F|3F|40|5B|5D|5E|60|7B|7D|7C)/g,decodeURIComponent),t=encodeURIComponent(String(t)),t=t.replace(/%(23|24|26|2B|5E|60|7C)/g,decodeURIComponent),t=t.replace(/[\(\)]/g,escape);var l="";for(var s in i)i[s]&&(l+="; "+s,!0!==i[s]&&(l+="="+i[s]));return document.cookie=t+"="+o+l}t||(a={});for(var c=document.cookie?document.cookie.split("; "):[],f=/(%[0-9A-Z]{2})+/g,d=0;d-1?t:e}function p(e,t){t=t||{};var n=t.body;if(e instanceof p){if(e.bodyUsed)throw new TypeError("Already read");this.url=e.url,this.credentials=e.credentials,t.headers||(this.headers=new o(e.headers)),this.method=e.method,this.mode=e.mode,n||null==e._bodyInit||(n=e._bodyInit,e.bodyUsed=!0)}else this.url=String(e);if(this.credentials=t.credentials||this.credentials||"omit",!t.headers&&this.headers||(this.headers=new o(t.headers)),this.method=d(t.method||this.method||"GET"),this.mode=t.mode||this.mode||null,this.referrer=null,("GET"===this.method||"HEAD"===this.method)&&n)throw new TypeError("Body not allowed for GET or HEAD requests");this._initBody(n)}function h(e){var t=new FormData;return e.trim().split("&").forEach(function(e){if(e){var n=e.split("="),r=n.shift().replace(/\+/g," "),o=n.join("=").replace(/\+/g," ");t.append(decodeURIComponent(r),decodeURIComponent(o))}}),t}function y(e){var t=new o;return e.replace(/\r?\n[\t ]+/g," ").split(/\r?\n/).forEach(function(e){var n=e.split(":"),r=n.shift().trim();if(r){var o=n.join(":").trim();t.append(r,o)}}),t}function m(e,t){t||(t={}),this.type="default",this.status=void 0===t.status?200:t.status,this.ok=this.status>=200&&this.status<300,this.statusText="statusText"in t?t.statusText:"OK",this.headers=new o(t.headers),this.url=t.url||"",this._initBody(e)}if(!e.fetch){var v={searchParams:"URLSearchParams"in e,iterable:"Symbol"in e&&"iterator"in Symbol,blob:"FileReader"in e&&"Blob"in e&&function(){try{return new Blob,!0}catch(e){return!1}}(),formData:"FormData"in e,arrayBuffer:"ArrayBuffer"in e};if(v.arrayBuffer)var g=["[object Int8Array]","[object Uint8Array]","[object Uint8ClampedArray]","[object Int16Array]","[object Uint16Array]","[object Int32Array]","[object Uint32Array]","[object Float32Array]","[object Float64Array]"],b=function(e){return e&&DataView.prototype.isPrototypeOf(e)},w=ArrayBuffer.isView||function(e){return e&&g.indexOf(Object.prototype.toString.call(e))>-1};o.prototype.append=function(e,r){e=t(e),r=n(r);var o=this.map[e];this.map[e]=o?o+","+r:r},o.prototype.delete=function(e){delete this.map[t(e)]},o.prototype.get=function(e){return e=t(e),this.has(e)?this.map[e]:null},o.prototype.has=function(e){return this.map.hasOwnProperty(t(e))},o.prototype.set=function(e,r){this.map[t(e)]=n(r)},o.prototype.forEach=function(e,t){for(var n in this.map)this.map.hasOwnProperty(n)&&e.call(t,this.map[n],n,this)},o.prototype.keys=function(){var e=[];return this.forEach(function(t,n){e.push(n)}),r(e)},o.prototype.values=function(){var e=[];return this.forEach(function(t){e.push(t)}),r(e)},o.prototype.entries=function(){var e=[];return this.forEach(function(t,n){e.push([n,t])}),r(e)},v.iterable&&(o.prototype[Symbol.iterator]=o.prototype.entries);var _=["DELETE","GET","HEAD","OPTIONS","POST","PUT"];p.prototype.clone=function(){return new p(this,{body:this._bodyInit})},f.call(p.prototype),f.call(m.prototype),m.prototype.clone=function(){return new m(this._bodyInit,{status:this.status,statusText:this.statusText,headers:new o(this.headers),url:this.url})},m.error=function(){var e=new m(null,{status:0,statusText:""});return e.type="error",e};var O=[301,302,303,307,308];m.redirect=function(e,t){if(-1===O.indexOf(t))throw new RangeError("Invalid status code");return new m(null,{status:t,headers:{location:e}})},e.Headers=o,e.Request=p,e.Response=m,e.fetch=function(e,t){return new Promise(function(n,r){var o=new p(e,t),i=new XMLHttpRequest;i.onload=function(){var e={status:i.status,statusText:i.statusText,headers:y(i.getAllResponseHeaders()||"")};e.url="responseURL"in i?i.responseURL:e.headers.get("X-Request-URL");var t="response"in i?i.response:i.responseText;n(new m(t,e))},i.onerror=function(){r(new TypeError("Network request failed"))},i.ontimeout=function(){r(new TypeError("Network request failed"))},i.open(o.method,o.url,!0),"include"===o.credentials?i.withCredentials=!0:"omit"===o.credentials&&(i.withCredentials=!1),"responseType"in i&&v.blob&&(i.responseType="blob"),o.headers.forEach(function(e,t){i.setRequestHeader(t,e)}),i.send(void 0===o._bodyInit?null:o._bodyInit)})},e.fetch.polyfill=!0}}("undefined"!=typeof self?self:this)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});t.addPassiveEventListener=function(e,t,n){var r=function(){var e=!1;try{var t=Object.defineProperty({},"passive",{get:function(){e=!0}});window.addEventListener("test",null,t)}catch(e){}return e}();e.addEventListener(t,n,!!r&&{passive:!0})},t.removePassiveEventListener=function(e,t,n){e.removeEventListener(t,n)}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r={registered:{},scrollEvent:{register:function(e,t){r.registered[e]=t},remove:function(e){r.registered[e]=null}}};t.default=r},function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}function o(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function i(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}Object.defineProperty(t,"__esModule",{value:!0});var u=Object.assign||function(e){for(var t=1;t=Math.floor(a)&&c=Math.floor(u),p=n.getActiveLink();d&&(o===p&&n.setActiveLink(void 0),e.props.hashSpy&&w.default.getHash()===o&&w.default.changeHash(),e.props.spy&&e.state.active&&(e.setState({active:!1}),e.props.onSetInactive&&e.props.onSetInactive(o,i))),!f||p===o&&!1!==e.state.active||(n.setActiveLink(o),e.props.hashSpy&&w.default.changeHash(o),e.props.spy&&(e.setState({active:!0}),e.props.onSetActive&&e.props.onSetActive(o,i)))}}};return r.propTypes=_,r.defaultProps={offset:0},r}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=n(48),o=function(e){return e&&e.__esModule?e:{default:e}}(r),i=n(38),a=function(e){return(0,o.default)(e,66)},u={spyCallbacks:[],spySetState:[],scrollSpyContainers:[],mount:function(e){if(e){var t=a(function(t){u.scrollHandler(e)});u.scrollSpyContainers.push(e),(0,i.addPassiveEventListener)(e,"scroll",t)}},isMounted:function(e){return-1!==u.scrollSpyContainers.indexOf(e)},currentPositionY:function(e){if(e===document){var t=void 0!==window.pageXOffset,n="CSS1Compat"===(document.compatMode||"");return t?window.pageYOffset:n?document.documentElement.scrollTop:document.body.scrollTop}return e.scrollTop},scrollHandler:function(e){(u.scrollSpyContainers[u.scrollSpyContainers.indexOf(e)].spyCallbacks||[]).forEach(function(t){return t(u.currentPositionY(e))})},addStateHandler:function(e){u.spySetState.push(e)},addSpyHandler:function(e,t){var n=u.scrollSpyContainers[u.scrollSpyContainers.indexOf(t)];n.spyCallbacks||(n.spyCallbacks=[]),n.spyCallbacks.push(e),e(u.currentPositionY(t))},updateStates:function(){u.spySetState.forEach(function(e){return e()})},unmount:function(e,t){u.scrollSpyContainers.forEach(function(e){return e.spyCallbacks&&e.spyCallbacks.length&&e.spyCallbacks.splice(e.spyCallbacks.indexOf(t),1)}),u.spySetState&&u.spySetState.length&&u.spySetState.splice(u.spySetState.indexOf(e),1),document.removeEventListener("scroll",u.scrollHandler)},update:function(){return u.scrollSpyContainers.forEach(function(e){return u.scrollHandler(e)})}};t.default=u},function(e,t){},function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}Object.defineProperty(t,"__esModule",{value:!0});var o=Object.assign||function(e){for(var t=1;t=o.duration?1:t(o.progress/o.duration),o.currentPositionY=o.startPositionY+Math.ceil(o.deltaTop*o.percent),o.containerElement&&o.containerElement!==document&&o.containerElement!==document.body?o.containerElement.scrollTop=o.currentPositionY:window.scrollTo(0,o.currentPositionY),o.percent<1){var i=e.bind(null,t,n);return void y.call(window,i)}f.default.registered.end&&f.default.registered.end(o.to,o.target,o.currentPositionY)},w=function(e){e.data.containerElement=e?e.containerId?document.getElementById(e.containerId):e.container&&e.container.nodeType?e.container:document:null},_=function(e,t,n,r){if(t.data=t.data||m(),window.clearTimeout(t.data.delayTimeout),s.default.subscribe(function(){t.data.cancel=!0}),w(t),t.data.start=null,t.data.cancel=!1,t.data.startPositionY=v(t),t.data.targetPositionY=t.absolute?e:e+t.data.startPositionY,t.data.startPositionY===t.data.targetPositionY)return void(f.default.registered.end&&f.default.registered.end(t.data.to,t.data.target,t.data.currentPositionY));t.data.deltaTop=Math.round(t.data.targetPositionY-t.data.startPositionY),t.data.duration=p(t.duration)(t.data.deltaTop),t.data.duration=isNaN(parseFloat(t.data.duration))?1e3:parseFloat(t.data.duration),t.data.to=n,t.data.target=r;var o=d(t),i=b.bind(null,o,t);if(t&&t.delay>0)return void(t.data.delayTimeout=window.setTimeout(function(){y.call(window,i)},t.delay));y.call(window,i)},O=function(e){return e=o({},e),e.data=e.data||m(),e.absolute=!0,e},k=function(e){_(0,O(e))},E=function(e,t){_(e,O(t))},S=function(e){e=O(e),w(e),_(g(e),e)},j=function(e,t){t=O(t),w(t),_(v(t)+e,t)};t.default={animateTopScroll:_,getAnimationType:d,scrollToTop:k,scrollToBottom:S,scrollTo:E,scrollMore:j}},function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}function o(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function i(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}Object.defineProperty(t,"__esModule",{value:!0});var u=Object.assign||function(e){for(var t=1;t=0;r--){var o=e[r];"."===o?e.splice(r,1):".."===o?(e.splice(r,1),n++):n&&(e.splice(r,1),n--)}if(t)for(;n--;n)e.unshift("..");return e}function r(e,t){if(e.filter)return e.filter(t);for(var n=[],r=0;r=-1&&!o;i--){var a=i>=0?arguments[i]:e.cwd();if("string"!=typeof a)throw new TypeError("Arguments to path.resolve must be strings");a&&(t=a+"/"+t,o="/"===a.charAt(0))}return t=n(r(t.split("/"),function(e){return!!e}),!o).join("/"),(o?"/":"")+t||"."},t.normalize=function(e){var o=t.isAbsolute(e),i="/"===a(e,-1);return e=n(r(e.split("/"),function(e){return!!e}),!o).join("/"),e||o||(e="."),e&&i&&(e+="/"),(o?"/":"")+e},t.isAbsolute=function(e){return"/"===e.charAt(0)},t.join=function(){var e=Array.prototype.slice.call(arguments,0);return t.normalize(r(e,function(e,t){if("string"!=typeof e)throw new TypeError("Arguments to path.join must be strings");return e}).join("/"))},t.relative=function(e,n){function r(e){for(var t=0;t=0&&""===e[n];n--);return t>n?[]:e.slice(t,n-t+1)}e=t.resolve(e).substr(1),n=t.resolve(n).substr(1);for(var o=r(e.split("/")),i=r(n.split("/")),a=Math.min(o.length,i.length),u=a,l=0;l=t||n<0||j&&r>=v}function c(){var e=k();if(s(e))return f(e);b=setTimeout(c,a(e))}function f(e){return b=void 0,P&&y?r(e):(y=m=void 0,g)}function d(){void 0!==b&&clearTimeout(b),E=0,y=w=m=b=void 0}function p(){return void 0===b?g:f(k())}function h(){var e=k(),n=s(e);if(y=arguments,m=this,w=e,n){if(void 0===b)return i(w);if(j)return b=setTimeout(c,t),r(w)}return void 0===b&&(b=setTimeout(c,t)),g}var y,m,v,g,b,w,E=0,S=!1,j=!1,P=!0;if("function"!=typeof e)throw new TypeError(l);return t=u(t)||0,o(n)&&(S=!!n.leading,j="maxWait"in n,v=j?_(u(n.maxWait)||0,t):v,P="trailing"in n?!!n.trailing:P),h.cancel=d,h.flush=p,h}function r(e,t,r){var i=!0,a=!0;if("function"!=typeof e)throw new TypeError(l);return o(r)&&(i="leading"in r?!!r.leading:i,a="trailing"in r?!!r.trailing:a),n(e,t,{leading:i,maxWait:t,trailing:a})}function o(e){var t=typeof e;return!!e&&("object"==t||"function"==t)}function i(e){return!!e&&"object"==typeof e}function a(e){return"symbol"==typeof e||i(e)&&w.call(e)==c}function u(e){if("number"==typeof e)return e;if(a(e))return s;if(o(e)){var t="function"==typeof e.valueOf?e.valueOf():e;e=o(t)?t+"":t}if("string"!=typeof e)return 0===e?e:+e;e=e.replace(f,"");var n=p.test(e);return n||h.test(e)?y(e.slice(2),n?2:8):d.test(e)?s:+e}var l="Expected a function",s=NaN,c="[object Symbol]",f=/^\s+|\s+$/g,d=/^[-+]0x[0-9a-f]+$/i,p=/^0b[01]+$/i,h=/^0o[0-7]+$/i,y=parseInt,m="object"==typeof t&&t&&t.Object===Object&&t,v="object"==typeof self&&self&&self.Object===Object&&self,g=m||v||Function("return this")(),b=Object.prototype,w=b.toString,_=Math.max,O=Math.min,k=function(){return g.Date.now()};e.exports=r}).call(t,n(56))},function(e,t){function n(){throw new Error("setTimeout has not been defined")}function r(){throw new Error("clearTimeout has not been defined")}function o(e){if(c===setTimeout)return setTimeout(e,0);if((c===n||!c)&&setTimeout)return c=setTimeout,setTimeout(e,0);try{return c(e,0)}catch(t){try{return c.call(null,e,0)}catch(t){return c.call(this,e,0)}}}function i(e){if(f===clearTimeout)return clearTimeout(e);if((f===r||!f)&&clearTimeout)return f=clearTimeout,clearTimeout(e);try{return f(e)}catch(t){try{return f.call(null,e)}catch(t){return f.call(this,e)}}}function a(){y&&p&&(y=!1,p.length?h=p.concat(h):m=-1,h.length&&u())}function u(){if(!y){var e=o(a);y=!0;for(var t=h.length;t;){for(p=h,h=[];++m1)for(var n=1;n=Math.floor(a)&&f=Math.floor(u),y=n.getActiveLink();return h?(o===y&&n.setActiveLink(void 0),e.props.hashSpy&&d.getHash()===o&&d.changeHash(),e.props.spy&&e.state.active&&(e.setState({active:!1}),e.props.onSetInactive&&e.props.onSetInactive()),s.updateStates()):p&&y!==o?(n.setActiveLink(o),e.props.hashSpy&&d.changeHash(o),e.props.spy&&(e.setState({active:!0}),e.props.onSetActive&&e.props.onSetActive(o)),s.updateStates()):void 0}}};return f.propTypes=p,f.defaultProps={offset:0},f},Element:function(e){console.warn("Helpers.Element is deprecated since v1.7.0");var t=function(t){function n(e){r(this,n);var t=o(this,(n.__proto__||Object.getPrototypeOf(n)).call(this,e));return t.childBindings={domNode:null},t}return i(n,t),u(n,[{key:"componentDidMount",value:function(){if("undefined"==typeof window)return!1;this.registerElems(this.props.name)}},{key:"componentWillReceiveProps",value:function(e){this.props.name!==e.name&&this.registerElems(e.name)}},{key:"componentWillUnmount",value:function(){if("undefined"==typeof window)return!1;c.unregister(this.props.name)}},{key:"registerElems",value:function(e){c.register(e,this.childBindings.domNode)}},{key:"render",value:function(){return l.createElement(e,a({},this.props,{parentBindings:this.childBindings}))}}]),n}(l.Component);return t.propTypes={name:f.string,id:f.string},t}};e.exports=h},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=n(38),o=["mousedown","mousewheel","touchmove","keydown"];t.default={subscribe:function(e){return"undefined"!=typeof document&&o.forEach(function(t){return(0,r.addPassiveEventListener)(document,t,e)})}}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default={defaultEasing:function(e){return e<.5?Math.pow(2*e,2)/2:1-Math.pow(2*(1-e),2)/2},linear:function(e){return e},easeInQuad:function(e){return e*e},easeOutQuad:function(e){return e*(2-e)},easeInOutQuad:function(e){return e<.5?2*e*e:(4-2*e)*e-1},easeInCubic:function(e){return e*e*e},easeOutCubic:function(e){return--e*e*e+1},easeInOutCubic:function(e){return e<.5?4*e*e*e:(e-1)*(2*e-2)*(2*e-2)+1},easeInQuart:function(e){return e*e*e*e},easeOutQuart:function(e){return 1- --e*e*e*e},easeInOutQuart:function(e){return e<.5?8*e*e*e*e:1-8*--e*e*e*e},easeInQuint:function(e){return e*e*e*e*e},easeOutQuint:function(e){return 1+--e*e*e*e*e},easeInOutQuint:function(e){return e<.5?16*e*e*e*e*e:1+16*--e*e*e*e*e}}},function(e,t){var n;n=function(){return this}();try{n=n||Function("return this")()||(0,eval)("this")}catch(e){"object"==typeof window&&(n=window)}e.exports=n},,,,function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default={"en-us":{sidemenu:[{title:"Developers List",children:[{title:"Developers",link:"/en-us/docs/developers/developers_dev.html"}]},{title:"Contribute Guide",children:[{title:"New contributor guide",link:"/en-us/docs/developers/contributor-guide/new-contributor-guide_dev.html"},{title:"Test coverage guide",link:"/en-us/docs/developers/contributor-guide/test-coverage-guide_dev.html"},{title:"How to report security issues",link:"/en-us/docs/developers/contributor-guide/reporting-security-issues_dev.html"},{title:"How to contribute",link:"/en-us/docs/developers/guide_dev.html"}]},{title:"Committer Guide",children:[{title:"Label an Issue",link:"/en-us/docs/developers/committer-guide/label-an-issue-guide_dev.html"},{title:"Website Guide",link:"/en-us/docs/developers/committer-guide/website-guide_dev.html"},{title:"Release Guide",link:"/en-us/docs/developers/committer-guide/release-guide_dev.html"}]}],barText:"Developers"},"zh-cn":{sidemenu:[{title:"开发者列表",children:[{title:"开发人员",link:"/zh-cn/docs/developers/developers_dev.html"}]},{title:"贡献者向导",children:[{title:"新贡献者向导",link:"/zh-cn/docs/developers/contributor-guide/new-contributor-guide_dev.html"},{title:"测试覆盖率向导",link:"/zh-cn/docs/developers/contributor-guide/test-coverage-guide_dev.html"},{title:"如何汇报安全漏洞",link:"/zh-cn/docs/developers/contributor-guide/reporting-security-issues_dev.html"},{title:"参与贡献",link:"/zh-cn/docs/developers/guide_dev.html"}]},{title:"提交者向导",children:[{title:"给问题打标签",link:"/zh-cn/docs/developers/committer-guide/label-an-issue-guide_dev.html"},{title:"网站向导",link:"/zh-cn/docs/developers/committer-guide/website-guide_dev.html"},{title:"版本发布向导",link:"/zh-cn/docs/developers/committer-guide/release-guide_dev.html"}]}],barText:"开发者"}}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default={"en-us":{sidemenu:[{title:"Overview",children:[{title:"What is Seata?",link:"/en-us/docs/overview/what-is-seata.html"},{title:"Terminology",link:"/en-us/docs/overview/terminology.html"},{title:"FAQ",link:"/en-us/docs/overview/faq.html"}]},{title:"User Doc",children:[{title:"Quick Start",link:"/en-us/docs/user/quickstart.html"},{title:"API Guide",link:"/en-us/docs/user/api.html"},{title:"Microservices Framework Supports",link:"/en-us/docs/user/microservice.html"}]},{title:"Developer Guide",children:[{title:"Transaction Mode",children:[{title:"Seata AT mode",link:"/en-us/docs/dev/mode/at-mode.html"},{title:"Seata TCC mode",link:"/en-us/docs/dev/mode/tcc-mode.html"},{title:"Seata Saga mode",link:"/en-us/docs/dev/mode/saga-mode.html"}]},{title:"Metrics design",link:"/en-us/docs/dev/seata-mertics.html"}]},{title:"Ops Guide",children:[{title:"Configuration Isolation",link:"/en-us/docs/ops/multi-configuration-isolation.html"},{title:"Deploy",children:[{title:"Deploy Directly",link:"/en-us/docs/ops/deploy-server.html"},{title:"Deploy by Docker",link:"/en-us/docs/ops/deploy-by-docker.html"},{title:"Deploy by Kubernetes",link:"/en-us/docs/ops/deploy-by-kubernetes.html"},{title:"Deploy by Helm",link:"/en-us/docs/ops/deploy-by-helm.html"}]}]}],barText:"Documentation"},"zh-cn":{sidemenu:[{title:"概述",children:[{title:"Seata 是什么?",link:"/zh-cn/docs/overview/what-is-seata.html"},{title:"术语表",link:"/zh-cn/docs/overview/terminology.html"},{title:"FAQ",link:"/zh-cn/docs/overview/faq.html"}]},{title:"用户文档",children:[{title:"快速启动",link:"/zh-cn/docs/user/quickstart.html"},{title:"Saga 模式",link:"/zh-cn/docs/user/saga.html"},{title:"参数配置",link:"/zh-cn/docs/user/configurations.html"},{title:"Spring 支持",link:"/zh-cn/docs/user/spring.html"},{title:"API 支持",link:"/zh-cn/docs/user/api.html"},{title:"微服务框架支持",link:"/zh-cn/docs/user/microservice.html"},{title:"ORM 框架支持",link:"/zh-cn/docs/user/ormframework.html"},{title:"数据源类型支持",link:"/zh-cn/docs/user/datasource.html"}]},{title:"开发者指南",children:[{title:"各事务模式",children:[{title:"Seata AT 模式",link:"/zh-cn/docs/dev/mode/at-mode.html"},{title:"Seata TCC 模式",link:"/zh-cn/docs/dev/mode/tcc-mode.html"},{title:"Seata Saga 模式",link:"/zh-cn/docs/dev/mode/saga-mode.html"}]},{title:"Metrics设计",link:"/zh-cn/docs/dev/seata-mertics.html"}]},{title:"运维指南",children:[{title:"Metrics配置",link:"/zh-cn/docs/ops/operation.html"},{title:"部署",children:[{title:"新人文档",link:"/zh-cn/docs/ops/deploy-guide-beginner.html"},{title:"直接部署",link:"/zh-cn/docs/ops/deploy-server.html"},{title:"Docker部署",link:"/zh-cn/docs/ops/deploy-by-docker.html"},{title:"Kubernetes部署",link:"/zh-cn/docs/ops/deploy-by-kubernetes.html"},{title:"Helm 部署",link:"/zh-cn/docs/ops/deploy-by-helm.html"}]}]}],barText:"文档"}}},,,,function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}function o(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function i(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}Object.defineProperty(t,"__esModule",{value:!0});var u=function(){function e(e,t){for(var n=0;n1){if(i=e({path:"/"},r.defaults,i),"number"==typeof i.expires){var l=new Date;l.setMilliseconds(l.getMilliseconds()+864e5*i.expires),i.expires=l}i.expires=i.expires?i.expires.toUTCString():"";try{a=JSON.stringify(o),/^[\{\[]/.test(a)&&(o=a)}catch(e){}o=n.write?n.write(o,t):encodeURIComponent(String(o)).replace(/%(23|24|26|2B|3A|3C|3E|3D|2F|3F|40|5B|5D|5E|60|7B|7D|7C)/g,decodeURIComponent),t=encodeURIComponent(String(t)),t=t.replace(/%(23|24|26|2B|5E|60|7C)/g,decodeURIComponent),t=t.replace(/[\(\)]/g,escape);var u="";for(var s in i)i[s]&&(u+="; "+s,!0!==i[s]&&(u+="="+i[s]));return document.cookie=t+"="+o+u}t||(a={});for(var c=document.cookie?document.cookie.split("; "):[],f=/(%[0-9A-Z]{2})+/g,d=0;d-1?t:e}function p(e,t){t=t||{};var n=t.body;if(e instanceof p){if(e.bodyUsed)throw new TypeError("Already read");this.url=e.url,this.credentials=e.credentials,t.headers||(this.headers=new o(e.headers)),this.method=e.method,this.mode=e.mode,n||null==e._bodyInit||(n=e._bodyInit,e.bodyUsed=!0)}else this.url=String(e);if(this.credentials=t.credentials||this.credentials||"omit",!t.headers&&this.headers||(this.headers=new o(t.headers)),this.method=d(t.method||this.method||"GET"),this.mode=t.mode||this.mode||null,this.referrer=null,("GET"===this.method||"HEAD"===this.method)&&n)throw new TypeError("Body not allowed for GET or HEAD requests");this._initBody(n)}function h(e){var t=new FormData;return e.trim().split("&").forEach(function(e){if(e){var n=e.split("="),r=n.shift().replace(/\+/g," "),o=n.join("=").replace(/\+/g," ");t.append(decodeURIComponent(r),decodeURIComponent(o))}}),t}function m(e){var t=new o;return e.replace(/\r?\n[\t ]+/g," ").split(/\r?\n/).forEach(function(e){var n=e.split(":"),r=n.shift().trim();if(r){var o=n.join(":").trim();t.append(r,o)}}),t}function y(e,t){t||(t={}),this.type="default",this.status=void 0===t.status?200:t.status,this.ok=this.status>=200&&this.status<300,this.statusText="statusText"in t?t.statusText:"OK",this.headers=new o(t.headers),this.url=t.url||"",this._initBody(e)}if(!e.fetch){var g={searchParams:"URLSearchParams"in e,iterable:"Symbol"in e&&"iterator"in Symbol,blob:"FileReader"in e&&"Blob"in e&&function(){try{return new Blob,!0}catch(e){return!1}}(),formData:"FormData"in e,arrayBuffer:"ArrayBuffer"in e};if(g.arrayBuffer)var v=["[object Int8Array]","[object Uint8Array]","[object Uint8ClampedArray]","[object Int16Array]","[object Uint16Array]","[object Int32Array]","[object Uint32Array]","[object Float32Array]","[object Float64Array]"],b=function(e){return e&&DataView.prototype.isPrototypeOf(e)},w=ArrayBuffer.isView||function(e){return e&&v.indexOf(Object.prototype.toString.call(e))>-1};o.prototype.append=function(e,r){e=t(e),r=n(r);var o=this.map[e];this.map[e]=o?o+","+r:r},o.prototype.delete=function(e){delete this.map[t(e)]},o.prototype.get=function(e){return e=t(e),this.has(e)?this.map[e]:null},o.prototype.has=function(e){return this.map.hasOwnProperty(t(e))},o.prototype.set=function(e,r){this.map[t(e)]=n(r)},o.prototype.forEach=function(e,t){for(var n in this.map)this.map.hasOwnProperty(n)&&e.call(t,this.map[n],n,this)},o.prototype.keys=function(){var e=[];return this.forEach(function(t,n){e.push(n)}),r(e)},o.prototype.values=function(){var e=[];return this.forEach(function(t){e.push(t)}),r(e)},o.prototype.entries=function(){var e=[];return this.forEach(function(t,n){e.push([n,t])}),r(e)},g.iterable&&(o.prototype[Symbol.iterator]=o.prototype.entries);var _=["DELETE","GET","HEAD","OPTIONS","POST","PUT"];p.prototype.clone=function(){return new p(this,{body:this._bodyInit})},f.call(p.prototype),f.call(y.prototype),y.prototype.clone=function(){return new y(this._bodyInit,{status:this.status,statusText:this.statusText,headers:new o(this.headers),url:this.url})},y.error=function(){var e=new y(null,{status:0,statusText:""});return e.type="error",e};var k=[301,302,303,307,308];y.redirect=function(e,t){if(-1===k.indexOf(t))throw new RangeError("Invalid status code");return new y(null,{status:t,headers:{location:e}})},e.Headers=o,e.Request=p,e.Response=y,e.fetch=function(e,t){return new Promise(function(n,r){var o=new p(e,t),i=new XMLHttpRequest;i.onload=function(){var e={status:i.status,statusText:i.statusText,headers:m(i.getAllResponseHeaders()||"")};e.url="responseURL"in i?i.responseURL:e.headers.get("X-Request-URL");var t="response"in i?i.response:i.responseText;n(new y(t,e))},i.onerror=function(){r(new TypeError("Network request failed"))},i.ontimeout=function(){r(new TypeError("Network request failed"))},i.open(o.method,o.url,!0),"include"===o.credentials?i.withCredentials=!0:"omit"===o.credentials&&(i.withCredentials=!1),"responseType"in i&&g.blob&&(i.responseType="blob"),o.headers.forEach(function(e,t){i.setRequestHeader(t,e)}),i.send(void 0===o._bodyInit?null:o._bodyInit)})},e.fetch.polyfill=!0}}("undefined"!=typeof self?self:this)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});t.addPassiveEventListener=function(e,t,n){var r=function(){var e=!1;try{var t=Object.defineProperty({},"passive",{get:function(){e=!0}});window.addEventListener("test",null,t)}catch(e){}return e}();e.addEventListener(t,n,!!r&&{passive:!0})},t.removePassiveEventListener=function(e,t,n){e.removeEventListener(t,n)}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r={registered:{},scrollEvent:{register:function(e,t){r.registered[e]=t},remove:function(e){r.registered[e]=null}}};t.default=r},function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}function o(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function i(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}Object.defineProperty(t,"__esModule",{value:!0});var l=Object.assign||function(e){for(var t=1;t=Math.floor(a)&&c=Math.floor(l),p=n.getActiveLink();d&&(o===p&&n.setActiveLink(void 0),e.props.hashSpy&&w.default.getHash()===o&&w.default.changeHash(),e.props.spy&&e.state.active&&(e.setState({active:!1}),e.props.onSetInactive&&e.props.onSetInactive(o,i))),!f||p===o&&!1!==e.state.active||(n.setActiveLink(o),e.props.hashSpy&&w.default.changeHash(o),e.props.spy&&(e.setState({active:!0}),e.props.onSetActive&&e.props.onSetActive(o,i)))}}};return r.propTypes=_,r.defaultProps={offset:0},r}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=n(28),o=function(e){return e&&e.__esModule?e:{default:e}}(r),i=n(18),a=function(e){return(0,o.default)(e,66)},l={spyCallbacks:[],spySetState:[],scrollSpyContainers:[],mount:function(e){if(e){var t=a(function(t){l.scrollHandler(e)});l.scrollSpyContainers.push(e),(0,i.addPassiveEventListener)(e,"scroll",t)}},isMounted:function(e){return-1!==l.scrollSpyContainers.indexOf(e)},currentPositionY:function(e){if(e===document){var t=void 0!==window.pageXOffset,n="CSS1Compat"===(document.compatMode||"");return t?window.pageYOffset:n?document.documentElement.scrollTop:document.body.scrollTop}return e.scrollTop},scrollHandler:function(e){(l.scrollSpyContainers[l.scrollSpyContainers.indexOf(e)].spyCallbacks||[]).forEach(function(t){return t(l.currentPositionY(e))})},addStateHandler:function(e){l.spySetState.push(e)},addSpyHandler:function(e,t){var n=l.scrollSpyContainers[l.scrollSpyContainers.indexOf(t)];n.spyCallbacks||(n.spyCallbacks=[]),n.spyCallbacks.push(e),e(l.currentPositionY(t))},updateStates:function(){l.spySetState.forEach(function(e){return e()})},unmount:function(e,t){l.scrollSpyContainers.forEach(function(e){return e.spyCallbacks&&e.spyCallbacks.length&&e.spyCallbacks.splice(e.spyCallbacks.indexOf(t),1)}),l.spySetState&&l.spySetState.length&&l.spySetState.splice(l.spySetState.indexOf(e),1),document.removeEventListener("scroll",l.scrollHandler)},update:function(){return l.scrollSpyContainers.forEach(function(e){return l.scrollHandler(e)})}};t.default=l},function(e,t){},function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}Object.defineProperty(t,"__esModule",{value:!0});var o=Object.assign||function(e){for(var t=1;t=o.duration?1:t(o.progress/o.duration),o.currentPositionY=o.startPositionY+Math.ceil(o.deltaTop*o.percent),o.containerElement&&o.containerElement!==document&&o.containerElement!==document.body?o.containerElement.scrollTop=o.currentPositionY:window.scrollTo(0,o.currentPositionY),o.percent<1){var i=e.bind(null,t,n);return void m.call(window,i)}f.default.registered.end&&f.default.registered.end(o.to,o.target,o.currentPositionY)},w=function(e){e.data.containerElement=e?e.containerId?document.getElementById(e.containerId):e.container&&e.container.nodeType?e.container:document:null},_=function(e,t,n,r){if(t.data=t.data||y(),window.clearTimeout(t.data.delayTimeout),s.default.subscribe(function(){t.data.cancel=!0}),w(t),t.data.start=null,t.data.cancel=!1,t.data.startPositionY=g(t),t.data.targetPositionY=t.absolute?e:e+t.data.startPositionY,t.data.startPositionY===t.data.targetPositionY)return void(f.default.registered.end&&f.default.registered.end(t.data.to,t.data.target,t.data.currentPositionY));t.data.deltaTop=Math.round(t.data.targetPositionY-t.data.startPositionY),t.data.duration=p(t.duration)(t.data.deltaTop),t.data.duration=isNaN(parseFloat(t.data.duration))?1e3:parseFloat(t.data.duration),t.data.to=n,t.data.target=r;var o=d(t),i=b.bind(null,o,t);if(t&&t.delay>0)return void(t.data.delayTimeout=window.setTimeout(function(){m.call(window,i)},t.delay));m.call(window,i)},k=function(e){return e=o({},e),e.data=e.data||y(),e.absolute=!0,e},O=function(e){_(0,k(e))},E=function(e,t){_(e,k(t))},S=function(e){e=k(e),w(e),_(v(e),e)},T=function(e,t){t=k(t),w(t),_(g(t)+e,t)};t.default={animateTopScroll:_,getAnimationType:d,scrollToTop:O,scrollToBottom:S,scrollTo:E,scrollMore:T}},function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}function o(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function i(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}Object.defineProperty(t,"__esModule",{value:!0});var l=Object.assign||function(e){for(var t=1;t=0;r--){var o=e[r];"."===o?e.splice(r,1):".."===o?(e.splice(r,1),n++):n&&(e.splice(r,1),n--)}if(t)for(;n--;n)e.unshift("..");return e}function r(e,t){if(e.filter)return e.filter(t);for(var n=[],r=0;r=-1&&!o;i--){var a=i>=0?arguments[i]:e.cwd();if("string"!=typeof a)throw new TypeError("Arguments to path.resolve must be strings");a&&(t=a+"/"+t,o="/"===a.charAt(0))}return t=n(r(t.split("/"),function(e){return!!e}),!o).join("/"),(o?"/":"")+t||"."},t.normalize=function(e){var o=t.isAbsolute(e),i="/"===a(e,-1);return e=n(r(e.split("/"),function(e){return!!e}),!o).join("/"),e||o||(e="."),e&&i&&(e+="/"),(o?"/":"")+e},t.isAbsolute=function(e){return"/"===e.charAt(0)},t.join=function(){var e=Array.prototype.slice.call(arguments,0);return t.normalize(r(e,function(e,t){if("string"!=typeof e)throw new TypeError("Arguments to path.join must be strings");return e}).join("/"))},t.relative=function(e,n){function r(e){for(var t=0;t=0&&""===e[n];n--);return t>n?[]:e.slice(t,n-t+1)}e=t.resolve(e).substr(1),n=t.resolve(n).substr(1);for(var o=r(e.split("/")),i=r(n.split("/")),a=Math.min(o.length,i.length),l=a,u=0;u=t||n<0||T&&r>=g}function c(){var e=O();if(s(e))return f(e);b=setTimeout(c,a(e))}function f(e){return b=void 0,P&&m?r(e):(m=y=void 0,v)}function d(){void 0!==b&&clearTimeout(b),E=0,m=w=y=b=void 0}function p(){return void 0===b?v:f(O())}function h(){var e=O(),n=s(e);if(m=arguments,y=this,w=e,n){if(void 0===b)return i(w);if(T)return b=setTimeout(c,t),r(w)}return void 0===b&&(b=setTimeout(c,t)),v}var m,y,g,v,b,w,E=0,S=!1,T=!1,P=!0;if("function"!=typeof e)throw new TypeError(u);return t=l(t)||0,o(n)&&(S=!!n.leading,T="maxWait"in n,g=T?_(l(n.maxWait)||0,t):g,P="trailing"in n?!!n.trailing:P),h.cancel=d,h.flush=p,h}function r(e,t,r){var i=!0,a=!0;if("function"!=typeof e)throw new TypeError(u);return o(r)&&(i="leading"in r?!!r.leading:i,a="trailing"in r?!!r.trailing:a),n(e,t,{leading:i,maxWait:t,trailing:a})}function o(e){var t=typeof e;return!!e&&("object"==t||"function"==t)}function i(e){return!!e&&"object"==typeof e}function a(e){return"symbol"==typeof e||i(e)&&w.call(e)==c}function l(e){if("number"==typeof e)return e;if(a(e))return s;if(o(e)){var t="function"==typeof e.valueOf?e.valueOf():e;e=o(t)?t+"":t}if("string"!=typeof e)return 0===e?e:+e;e=e.replace(f,"");var n=p.test(e);return n||h.test(e)?m(e.slice(2),n?2:8):d.test(e)?s:+e}var u="Expected a function",s=NaN,c="[object Symbol]",f=/^\s+|\s+$/g,d=/^[-+]0x[0-9a-f]+$/i,p=/^0b[01]+$/i,h=/^0o[0-7]+$/i,m=parseInt,y="object"==typeof t&&t&&t.Object===Object&&t,g="object"==typeof self&&self&&self.Object===Object&&self,v=y||g||Function("return this")(),b=Object.prototype,w=b.toString,_=Math.max,k=Math.min,O=function(){return v.Date.now()};e.exports=r}).call(t,n(36))},function(e,t){function n(){throw new Error("setTimeout has not been defined")}function r(){throw new Error("clearTimeout has not been defined")}function o(e){if(c===setTimeout)return setTimeout(e,0);if((c===n||!c)&&setTimeout)return c=setTimeout,setTimeout(e,0);try{return c(e,0)}catch(t){try{return c.call(null,e,0)}catch(t){return c.call(this,e,0)}}}function i(e){if(f===clearTimeout)return clearTimeout(e);if((f===r||!f)&&clearTimeout)return f=clearTimeout,clearTimeout(e);try{return f(e)}catch(t){try{return f.call(null,e)}catch(t){return f.call(this,e)}}}function a(){m&&p&&(m=!1,p.length?h=p.concat(h):y=-1,h.length&&l())}function l(){if(!m){var e=o(a);m=!0;for(var t=h.length;t;){for(p=h,h=[];++y1)for(var n=1;n=Math.floor(a)&&f=Math.floor(l),m=n.getActiveLink();return h?(o===m&&n.setActiveLink(void 0),e.props.hashSpy&&d.getHash()===o&&d.changeHash(),e.props.spy&&e.state.active&&(e.setState({active:!1}),e.props.onSetInactive&&e.props.onSetInactive()),s.updateStates()):p&&m!==o?(n.setActiveLink(o),e.props.hashSpy&&d.changeHash(o),e.props.spy&&(e.setState({active:!0}),e.props.onSetActive&&e.props.onSetActive(o)),s.updateStates()):void 0}}};return f.propTypes=p,f.defaultProps={offset:0},f},Element:function(e){console.warn("Helpers.Element is deprecated since v1.7.0");var t=function(t){function n(e){r(this,n);var t=o(this,(n.__proto__||Object.getPrototypeOf(n)).call(this,e));return t.childBindings={domNode:null},t}return i(n,t),l(n,[{key:"componentDidMount",value:function(){if("undefined"==typeof window)return!1;this.registerElems(this.props.name)}},{key:"componentWillReceiveProps",value:function(e){this.props.name!==e.name&&this.registerElems(e.name)}},{key:"componentWillUnmount",value:function(){if("undefined"==typeof window)return!1;c.unregister(this.props.name)}},{key:"registerElems",value:function(e){c.register(e,this.childBindings.domNode)}},{key:"render",value:function(){return u.createElement(e,a({},this.props,{parentBindings:this.childBindings}))}}]),n}(u.Component);return t.propTypes={name:f.string,id:f.string},t}};e.exports=h},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=n(18),o=["mousedown","mousewheel","touchmove","keydown"];t.default={subscribe:function(e){return"undefined"!=typeof document&&o.forEach(function(t){return(0,r.addPassiveEventListener)(document,t,e)})}}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default={defaultEasing:function(e){return e<.5?Math.pow(2*e,2)/2:1-Math.pow(2*(1-e),2)/2},linear:function(e){return e},easeInQuad:function(e){return e*e},easeOutQuad:function(e){return e*(2-e)},easeInOutQuad:function(e){return e<.5?2*e*e:(4-2*e)*e-1},easeInCubic:function(e){return e*e*e},easeOutCubic:function(e){return--e*e*e+1},easeInOutCubic:function(e){return e<.5?4*e*e*e:(e-1)*(2*e-2)*(2*e-2)+1},easeInQuart:function(e){return e*e*e*e},easeOutQuart:function(e){return 1- --e*e*e*e},easeInOutQuart:function(e){return e<.5?8*e*e*e*e:1-8*--e*e*e*e},easeInQuint:function(e){return e*e*e*e*e},easeOutQuint:function(e){return 1+--e*e*e*e*e},easeInOutQuint:function(e){return e<.5?16*e*e*e*e*e:1+16*--e*e*e*e*e}}},function(e,t){var n;n=function(){return this}();try{n=n||Function("return this")()||(0,eval)("this")}catch(e){"object"==typeof window&&(n=window)}e.exports=n},,,,function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default={"en-us":{sidemenu:[{title:"Developers List",children:[{title:"Developers",link:"/en-us/docs/developers/developers_dev.html"}]},{title:"Contribute Guide",children:[{title:"New contributor guide",link:"/en-us/docs/developers/contributor-guide/new-contributor-guide_dev.html"},{title:"Test coverage guide",link:"/en-us/docs/developers/contributor-guide/test-coverage-guide_dev.html"},{title:"How to report security issues",link:"/en-us/docs/developers/contributor-guide/reporting-security-issues_dev.html"},{title:"How to contribute",link:"/en-us/docs/developers/guide_dev.html"}]},{title:"Committer Guide",children:[{title:"Label an Issue",link:"/en-us/docs/developers/committer-guide/label-an-issue-guide_dev.html"},{title:"Website Guide",link:"/en-us/docs/developers/committer-guide/website-guide_dev.html"},{title:"Release Guide",link:"/en-us/docs/developers/committer-guide/release-guide_dev.html"}]}],barText:"Developers"},"zh-cn":{sidemenu:[{title:"开发者列表",children:[{title:"开发人员",link:"/zh-cn/docs/developers/developers_dev.html"}]},{title:"贡献者向导",children:[{title:"新贡献者向导",link:"/zh-cn/docs/developers/contributor-guide/new-contributor-guide_dev.html"},{title:"测试覆盖率向导",link:"/zh-cn/docs/developers/contributor-guide/test-coverage-guide_dev.html"},{title:"如何汇报安全漏洞",link:"/zh-cn/docs/developers/contributor-guide/reporting-security-issues_dev.html"},{title:"参与贡献",link:"/zh-cn/docs/developers/guide_dev.html"}]},{title:"提交者向导",children:[{title:"给问题打标签",link:"/zh-cn/docs/developers/committer-guide/label-an-issue-guide_dev.html"},{title:"网站向导",link:"/zh-cn/docs/developers/committer-guide/website-guide_dev.html"},{title:"版本发布向导",link:"/zh-cn/docs/developers/committer-guide/release-guide_dev.html"}]}],barText:"开发者"}}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default={"en-us":{sidemenu:[{title:"Overview",children:[{title:"What is Seata?",link:"/en-us/docs/overview/what-is-seata.html"},{title:"Terminology",link:"/en-us/docs/overview/terminology.html"},{title:"FAQ",link:"/en-us/docs/overview/faq.html"}]},{title:"User Doc",children:[{title:"Quick Start",link:"/en-us/docs/user/quickstart.html"},{title:"API Guide",link:"/en-us/docs/user/api.html"},{title:"Microservices Framework Supports",link:"/en-us/docs/user/microservice.html"}]},{title:"Developer Guide",children:[{title:"Transaction Mode",children:[{title:"Seata AT mode",link:"/en-us/docs/dev/mode/at-mode.html"},{title:"Seata TCC mode",link:"/en-us/docs/dev/mode/tcc-mode.html"},{title:"Seata Saga mode",link:"/en-us/docs/dev/mode/saga-mode.html"}]},{title:"Metrics design",link:"/en-us/docs/dev/seata-mertics.html"}]},{title:"Ops Guide",children:[{title:"Configuration Isolation",link:"/en-us/docs/ops/multi-configuration-isolation.html"},{title:"Deploy",children:[{title:"Deploy Directly",link:"/en-us/docs/ops/deploy-server.html"},{title:"Deploy by Docker",link:"/en-us/docs/ops/deploy-by-docker.html"},{title:"Deploy by Kubernetes",link:"/en-us/docs/ops/deploy-by-kubernetes.html"},{title:"Deploy by Helm",link:"/en-us/docs/ops/deploy-by-helm.html"}]}]}],barText:"Documentation"},"zh-cn":{sidemenu:[{title:"概述",children:[{title:"Seata 是什么?",link:"/zh-cn/docs/overview/what-is-seata.html"},{title:"术语表",link:"/zh-cn/docs/overview/terminology.html"},{title:"FAQ",link:"/zh-cn/docs/overview/faq.html"}]},{title:"用户文档",children:[{title:"快速启动",link:"/zh-cn/docs/user/quickstart.html"},{title:"Saga 模式",link:"/zh-cn/docs/user/saga.html"},{title:"参数配置",link:"/zh-cn/docs/user/configurations.html"},{title:"Spring 支持",link:"/zh-cn/docs/user/spring.html"},{title:"API 支持",link:"/zh-cn/docs/user/api.html"},{title:"微服务框架支持",link:"/zh-cn/docs/user/microservice.html"},{title:"ORM 框架支持",link:"/zh-cn/docs/user/ormframework.html"},{title:"数据源类型支持",link:"/zh-cn/docs/user/datasource.html"}]},{title:"开发者指南",children:[{title:"各事务模式",children:[{title:"Seata AT 模式",link:"/zh-cn/docs/dev/mode/at-mode.html"},{title:"Seata TCC 模式",link:"/zh-cn/docs/dev/mode/tcc-mode.html"},{title:"Seata Saga 模式",link:"/zh-cn/docs/dev/mode/saga-mode.html"}]},{title:"Metrics设计",link:"/zh-cn/docs/dev/seata-mertics.html"}]},{title:"运维指南",children:[{title:"Metrics配置",link:"/zh-cn/docs/ops/operation.html"},{title:"部署",children:[{title:"新人文档",link:"/zh-cn/docs/ops/deploy-guide-beginner.html"},{title:"直接部署",link:"/zh-cn/docs/ops/deploy-server.html"},{title:"Docker部署",link:"/zh-cn/docs/ops/deploy-by-docker.html"},{title:"Kubernetes部署",link:"/zh-cn/docs/ops/deploy-by-kubernetes.html"},{title:"Helm 部署",link:"/zh-cn/docs/ops/deploy-by-helm.html"}]}]}],barText:"文档"}}},,,,function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}function o(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function i(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}Object.defineProperty(t,"__esModule",{value:!0});var l=function(){function e(e,t){for(var n=0;n1&&/^\/[^\/]/.test(""+e)?""+window.rootPath+e:e},t.parseJSONStr=function(e){try{return JSON.parse(e)}catch(t){return e}}},function(e,t,r){e.exports=r(33)()},function(e,t){e.exports=ReactDOM},function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default={rootPath:"",port:8080,domain:"seata.io",defaultSearch:"baidu",defaultLanguage:"en-us","en-us":{pageMenu:[{key:"home",text:"HOME",link:"/en-us/index.html"},{key:"docs",text:"DOCS",link:"/en-us/docs/overview/what-is-seata.html"},{key:"developers",text:"DEVELOPERS",link:"/en-us/docs/developers/developers_dev.html"},{key:"blog",text:"BLOG",link:"/en-us/blog/index.html"},{key:"community",text:"COMMUNITY",link:"/en-us/community/index.html"},{key:"download",text:"DOWNLOAD",link:"/en-us/blog/download.html"}],vision:{title:"Vision",content:"Seata is an Alibaba open source distributed transaction solution that delivers high performance and easy to use distributed transaction services under a microservices architecture."},documentation:{title:"Documentation",list:[{text:"What is Seata?",link:"/en-us/docs/overview/what-is-seata.html"},{text:"Quick Start",link:"/en-us/docs/user/quickstart.html"},{text:"Report a doc issue",link:"https://github.com/seata/seata.github.io/issues/new"},{text:"Edit This Page on GitHub",link:"https://github.com/seata/seata.github.io"}]},resources:{title:"Resources",list:[{text:"Blog",link:"/en-us/blog/index.html"},{text:"Community",link:"/en-us/community/index.html"}]},copyright:"Copyright © 2019 Seata"},"zh-cn":{pageMenu:[{key:"home",text:"首页",link:"/zh-cn/index.html"},{key:"docs",text:"文档",link:"/zh-cn/docs/overview/what-is-seata.html"},{key:"developers",text:"开发者",link:"/zh-cn/docs/developers/developers_dev.html"},{key:"blog",text:"博客",link:"/zh-cn/blog/index.html"},{key:"community",text:"社区",link:"/zh-cn/community/index.html"},{key:"download",text:"下载",link:"/zh-cn/blog/download.html"}],vision:{title:"愿景",content:"Seata 是一款阿里巴巴开源的分布式事务解决方案,致力于在微服务架构下提供高性能和简单易用的分布式事务服务。"},documentation:{title:"文档",list:[{text:"Seata 是什么?",link:"/zh-cn/docs/overview/what-is-seata.html"},{text:"快速开始",link:"/zh-cn/docs/user/quickstart.html"},{text:"报告文档问题",link:"https://github.com/seata/seata.github.io/issues/new"},{text:"在Github上编辑此文档",link:"https://github.com/seata/seata.github.io"}]},resources:{title:"资源",list:[{text:"博客",link:"/zh-cn/blog/index.html"},{text:"社区",link:"/zh-cn/community/index.html"}]},copyright:"Copyright © 2019 Seata"}}},function(e,t,r){var n,o;/*! +!function(e){function t(r){if(n[r])return n[r].exports;var a=n[r]={i:r,l:!1,exports:{}};return e[r].call(a.exports,a,a.exports,t),a.l=!0,a.exports}var n={};t.m=e,t.c=n,t.i=function(e){return e},t.d=function(e,n,r){t.o(e,n)||Object.defineProperty(e,n,{configurable:!1,enumerable:!0,get:r})},t.n=function(e){var n=e&&e.__esModule?function(){return e.default}:function(){return e};return t.d(n,"a",n),n},t.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},t.p="/build/",t(t.s=63)}([function(e,t){e.exports=React},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});t.throttle=function(e,t){var n=null;return function(){for(var r=arguments.length,a=Array(r),o=0;o1&&/^\/[^\/]/.test(""+e)?""+window.rootPath+e:e},t.parseJSONStr=function(e){try{return JSON.parse(e)}catch(t){return e}}},function(e,t,n){e.exports=n(13)()},function(e,t){e.exports=ReactDOM},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default={rootPath:"",port:8080,domain:"seata.io",defaultSearch:"baidu",defaultLanguage:"en-us","en-us":{pageMenu:[{key:"home",text:"HOME",link:"/en-us/index.html"},{key:"docs",text:"DOCS",link:"/en-us/docs/overview/what-is-seata.html"},{key:"developers",text:"DEVELOPERS",link:"/en-us/docs/developers/developers_dev.html"},{key:"blog",text:"BLOG",link:"/en-us/blog/index.html"},{key:"community",text:"COMMUNITY",link:"/en-us/community/index.html"},{key:"download",text:"DOWNLOAD",link:"/en-us/blog/download.html"}],vision:{title:"Vision",content:"Seata is an Alibaba open source distributed transaction solution that delivers high performance and easy to use distributed transaction services under a microservices architecture."},documentation:{title:"Documentation",list:[{text:"What is Seata?",link:"/en-us/docs/overview/what-is-seata.html"},{text:"Quick Start",link:"/en-us/docs/user/quickstart.html"},{text:"Report a doc issue",link:"https://github.com/seata/seata.github.io/issues/new"},{text:"Edit This Page on GitHub",link:"https://github.com/seata/seata.github.io"}]},resources:{title:"Resources",list:[{text:"Blog",link:"/en-us/blog/index.html"},{text:"Community",link:"/en-us/community/index.html"}]},copyright:"Copyright © 2019 Seata"},"zh-cn":{pageMenu:[{key:"home",text:"首页",link:"/zh-cn/index.html"},{key:"docs",text:"文档",link:"/zh-cn/docs/overview/what-is-seata.html"},{key:"developers",text:"开发者",link:"/zh-cn/docs/developers/developers_dev.html"},{key:"blog",text:"博客",link:"/zh-cn/blog/index.html"},{key:"community",text:"社区",link:"/zh-cn/community/index.html"},{key:"download",text:"下载",link:"/zh-cn/blog/download.html"}],vision:{title:"愿景",content:"Seata 是一款阿里巴巴开源的分布式事务解决方案,致力于在微服务架构下提供高性能和简单易用的分布式事务服务。"},documentation:{title:"文档",list:[{text:"Seata 是什么?",link:"/zh-cn/docs/overview/what-is-seata.html"},{text:"快速开始",link:"/zh-cn/docs/user/quickstart.html"},{text:"报告文档问题",link:"https://github.com/seata/seata.github.io/issues/new"},{text:"在Github上编辑此文档",link:"https://github.com/seata/seata.github.io"}]},resources:{title:"资源",list:[{text:"博客",link:"/zh-cn/blog/index.html"},{text:"社区",link:"/zh-cn/community/index.html"}]},copyright:"Copyright © 2019 Seata"}}},function(e,t,n){var r,a;/*! Copyright (c) 2017 Jed Watson. Licensed under the MIT License (MIT), see http://jedwatson.github.io/classnames */ -!function(){"use strict";function r(){for(var e=[],t=0;t1&&void 0!==arguments[1]?arguments[1]:t.key)+arguments[2]})}},{key:"key",get:function(){return this.childDescriptor.key}},{key:"parentNotation",get:function(){return this.parentKlass.constructor.name+"#"+this.parentPropertySignature}},{key:"childNotation",get:function(){return this.childKlass.constructor.name+"#"+this.childPropertySignature}},{key:"parentTopic",get:function(){return this._getTopic(this.parentDescriptor)}},{key:"childTopic",get:function(){return this._getTopic(this.childDescriptor)}},{key:"parentPropertySignature",get:function(){return this._extractTopicSignature(this.parentTopic)}},{key:"childPropertySignature",get:function(){return this._extractTopicSignature(this.childTopic)}}]),h(e,[{key:"assert",value:function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";!0!==e&&this.error("{child} does not properly override {parent}"+t)}},{key:"error",value:function(e){var t=this;throw e=e.replace("{parent}",function(e){return t.parentNotation}).replace("{child}",function(e){return t.childNotation}),new SyntaxError(e)}}]),e}(),g=[function(e){return e.toLowerCase()},function(e){return e.toUpperCase()},function(e){return e+"s"},function(e){return e.slice(0,-1)},function(e){return e.slice(1,e.length)}]},function(e,t,r){"use strict";function n(e,t,n,c){var s=u(c,3),f=s[0],d=void 0===f?null:f,p=s[1],h=void 0!==p&&p,y=s[2],m=void 0===y?l:y;if(!o.__enabled)return o.__warned||(m.warn("console.profile is not supported. All @profile decorators are disabled."),o.__warned=!0),n;var g=n.value;if(null===d&&(d=e.constructor.name+"."+t),"function"!=typeof g)throw new SyntaxError("@profile can only be used on functions, not: "+g);return i({},n,{value:function(){var e=Date.now(),t=r.i(a.c)(this);(!0===h&&!t.profileLastRan||!1===h||"number"==typeof h&&e-t.profileLastRan>h||"function"==typeof h&&h.apply(this,arguments))&&(m.profile(d),t.profileLastRan=e);try{return g.apply(this,arguments)}finally{m.profileEnd(d)}}})}function o(){for(var e=arguments.length,t=Array(e),o=0;o1){if(a=e({path:"/"},n.defaults,a),"number"==typeof a.expires){var u=new Date;u.setMilliseconds(u.getMilliseconds()+864e5*a.expires),a.expires=u}a.expires=a.expires?a.expires.toUTCString():"";try{i=JSON.stringify(o),/^[\{\[]/.test(i)&&(o=i)}catch(e){}o=r.write?r.write(o,t):encodeURIComponent(String(o)).replace(/%(23|24|26|2B|3A|3C|3E|3D|2F|3F|40|5B|5D|5E|60|7B|7D|7C)/g,decodeURIComponent),t=encodeURIComponent(String(t)),t=t.replace(/%(23|24|26|2B|5E|60|7C)/g,decodeURIComponent),t=t.replace(/[\(\)]/g,escape);var l="";for(var c in a)a[c]&&(l+="; "+c,!0!==a[c]&&(l+="="+a[c]));return document.cookie=t+"="+o+l}t||(i={});for(var s=document.cookie?document.cookie.split("; "):[],f=/(%[0-9A-Z]{2})+/g,d=0;d-1?t:e}function p(e,t){t=t||{};var r=t.body;if(e instanceof p){if(e.bodyUsed)throw new TypeError("Already read");this.url=e.url,this.credentials=e.credentials,t.headers||(this.headers=new o(e.headers)),this.method=e.method,this.mode=e.mode,r||null==e._bodyInit||(r=e._bodyInit,e.bodyUsed=!0)}else this.url=String(e);if(this.credentials=t.credentials||this.credentials||"omit",!t.headers&&this.headers||(this.headers=new o(t.headers)),this.method=d(t.method||this.method||"GET"),this.mode=t.mode||this.mode||null,this.referrer=null,("GET"===this.method||"HEAD"===this.method)&&r)throw new TypeError("Body not allowed for GET or HEAD requests");this._initBody(r)}function h(e){var t=new FormData;return e.trim().split("&").forEach(function(e){if(e){var r=e.split("="),n=r.shift().replace(/\+/g," "),o=r.join("=").replace(/\+/g," ");t.append(decodeURIComponent(n),decodeURIComponent(o))}}),t}function y(e){var t=new o;return e.replace(/\r?\n[\t ]+/g," ").split(/\r?\n/).forEach(function(e){var r=e.split(":"),n=r.shift().trim();if(n){var o=r.join(":").trim();t.append(n,o)}}),t}function m(e,t){t||(t={}),this.type="default",this.status=void 0===t.status?200:t.status,this.ok=this.status>=200&&this.status<300,this.statusText="statusText"in t?t.statusText:"OK",this.headers=new o(t.headers),this.url=t.url||"",this._initBody(e)}if(!e.fetch){var g={searchParams:"URLSearchParams"in e,iterable:"Symbol"in e&&"iterator"in Symbol,blob:"FileReader"in e&&"Blob"in e&&function(){try{return new Blob,!0}catch(e){return!1}}(),formData:"FormData"in e,arrayBuffer:"ArrayBuffer"in e};if(g.arrayBuffer)var b=["[object Int8Array]","[object Uint8Array]","[object Uint8ClampedArray]","[object Int16Array]","[object Uint16Array]","[object Int32Array]","[object Uint32Array]","[object Float32Array]","[object Float64Array]"],v=function(e){return e&&DataView.prototype.isPrototypeOf(e)},w=ArrayBuffer.isView||function(e){return e&&b.indexOf(Object.prototype.toString.call(e))>-1};o.prototype.append=function(e,n){e=t(e),n=r(n);var o=this.map[e];this.map[e]=o?o+","+n:n},o.prototype.delete=function(e){delete this.map[t(e)]},o.prototype.get=function(e){return e=t(e),this.has(e)?this.map[e]:null},o.prototype.has=function(e){return this.map.hasOwnProperty(t(e))},o.prototype.set=function(e,n){this.map[t(e)]=r(n)},o.prototype.forEach=function(e,t){for(var r in this.map)this.map.hasOwnProperty(r)&&e.call(t,this.map[r],r,this)},o.prototype.keys=function(){var e=[];return this.forEach(function(t,r){e.push(r)}),n(e)},o.prototype.values=function(){var e=[];return this.forEach(function(t){e.push(t)}),n(e)},o.prototype.entries=function(){var e=[];return this.forEach(function(t,r){e.push([r,t])}),n(e)},g.iterable&&(o.prototype[Symbol.iterator]=o.prototype.entries);var E=["DELETE","GET","HEAD","OPTIONS","POST","PUT"];p.prototype.clone=function(){return new p(this,{body:this._bodyInit})},f.call(p.prototype),f.call(m.prototype),m.prototype.clone=function(){return new m(this._bodyInit,{status:this.status,statusText:this.statusText,headers:new o(this.headers),url:this.url})},m.error=function(){var e=new m(null,{status:0,statusText:""});return e.type="error",e};var O=[301,302,303,307,308];m.redirect=function(e,t){if(-1===O.indexOf(t))throw new RangeError("Invalid status code");return new m(null,{status:t,headers:{location:e}})},e.Headers=o,e.Request=p,e.Response=m,e.fetch=function(e,t){return new Promise(function(r,n){var o=new p(e,t),a=new XMLHttpRequest;a.onload=function(){var e={status:a.status,statusText:a.statusText,headers:y(a.getAllResponseHeaders()||"")};e.url="responseURL"in a?a.responseURL:e.headers.get("X-Request-URL");var t="response"in a?a.response:a.responseText;r(new m(t,e))},a.onerror=function(){n(new TypeError("Network request failed"))},a.ontimeout=function(){n(new TypeError("Network request failed"))},a.open(o.method,o.url,!0),"include"===o.credentials?a.withCredentials=!0:"omit"===o.credentials&&(a.withCredentials=!1),"responseType"in a&&g.blob&&(a.responseType="blob"),o.headers.forEach(function(e,t){a.setRequestHeader(t,e)}),a.send(void 0===o._bodyInit?null:o._bodyInit)})},e.fetch.polyfill=!0}}("undefined"!=typeof self?self:this)},,,,,,,,,,,,,,,,,,,,,,,,,function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default={"zh-cn":{brand:{brandName:"Seata",briefIntroduction:"Seata 是一款开源的分布式事务解决方案,致力于在微服务架构下提供高性能和简单易用的分布式事务服务。",buttons:[{text:"快速入门",link:"/zh-cn/docs/overview/what-is-seata.html",type:"primary"},{text:"Github",link:"https://github.com/seata/seata",type:"normal"}]},introduction:{title:"Seata 是什么?",desc:"Seata 是一款开源的分布式事务解决方案,致力于在微服务架构下提供高性能和简单易用的分布式事务服务。在 Seata 开源之前,Seata 对应的内部版本在阿里经济体内部一直扮演着分布式一致性中间件的角色,帮助经济体平稳的度过历年的双11,对各BU业务进行了有力的支撑。经过多年沉淀与积累,商业化产品先后在阿里云、金融云进行售卖。2019.1 为了打造更加完善的技术生态和普惠技术成果,Seata 正式宣布对外开源,未来 Seata 将以社区共建的形式帮助其技术更加可靠与完备。",img:"https://img.alicdn.com/tfs/TB1rDpkJAvoK1RjSZPfXXXPKFXa-794-478.png"},features:{title:"特色功能",list:[{icon:"feature-1",title:"微服务框架支持",content:"目前已支持 Dubbo、Spring Cloud、Sofa-RPC、Motan 和 grpc 等RPC框架,其他框架持续集成中"},{icon:"feature-2",title:"AT 模式",content:"提供无侵入自动补偿的事务模式,目前已支持 MySQL、 Oracle 的AT模式、PostgreSQL、H2 开发中"},{icon:"feature-3",title:"TCC 模式",content:"支持 TCC 模式并可与 AT 混用,灵活度更高"},{icon:"feature-4",title:"SAGA 模式",content:"为长事务提供有效的解决方案"},{icon:"feature-5",title:"XA 模式(开发中)",content:"支持已实现 XA 接口的数据库的 XA 模式"},{icon:"feature-6",title:"高可用",content:"支持基于数据库存储的集群模式,水平扩展能力强"}]}},"en-us":{brand:{brandName:"Seata",briefIntroduction:"Seata is an open source distributed transaction solution that delivers high performance and easy to use distributed transaction services under a microservices architecture.",buttons:[{text:"Get Started",link:"/en-us/docs/user/quickstart.html",type:"primary"},{text:"Github",link:"https://github.com/seata/seata",type:"normal"}]},introduction:{title:"What is Seata?",desc:"Seata is an open source distributed transaction solution that delivers high performance and easy to use distributed transaction services under a microservices architecture. Before the open-source of Seata, the internal version of Seata played a role of distributed consistency Middleware in Ali economy, helping the economy to survive the double 11 of the past years smoothly, and providing strong support for businesses of all departments. After years of precipitation and accumulation, commercial products have been sold in Alibaba cloud and financial cloud. 2019.1 in order to create a more complete technological ecology and inclusive technological achievements, Seata officially announced open source to the outside world. In the future, Seata will help its technology become more reliable and complete in the form of community building.",img:"https://img.alicdn.com/tfs/TB1rDpkJAvoK1RjSZPfXXXPKFXa-794-478.png"},features:{title:"Feature List",list:[{icon:"feature-1",title:"Microservices Framework Support",content:"RPC frameworks such as Dubbo, Spring Cloud, Sofa-RPC, Motan, and grpc are currently supported, and other frameworks are continuously integrated."},{icon:"feature-2",title:"AT mode",content:"Provides non-intrusive automatic compensation transaction mode, currently supports MySQL, Oracle's AT mode, PostgreSQL, In developing the H2."},{icon:"feature-3",title:"TCC mode",content:"Support TCC mode and mix with AT for greater flexibility."},{icon:"feature-4",title:"SAGA mode",content:"Provide an effective solution for long transactions."},{icon:"feature-5",title:"XA mode (under development)",content:"Support for XA schemas for databases that have implemented XA interfaces."},{icon:"feature-6",title:"High availability",content:"Support cluster mode based on database storage, strong horizontal scalability."}]}}}},function(e,t,r){"use strict";function n(e){return e&&e.__esModule?e:{default:e}}function o(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}Object.defineProperty(t,"__esModule",{value:!0});var a=r(1),i=n(a),u=r(3),l=n(u),c=r(6),s=n(c),f=r(2);r(84);var d={type:l.default.oneOf(["primary","normal"]),link:l.default.string,target:l.default.string},p={type:"primary",link:"",target:"_self"},h=function(e){return i.default.createElement("a",{className:(0,s.default)(o({button:!0},"button-"+e.type,!0)),target:e.target||"_self",href:(0,f.getLink)(e.link)},e.children)};h.propTypes=d,h.defaultProps=p,t.default=h},,,,,,,,function(e,t,r){"use strict";function n(e){return e&&e.__esModule?e:{default:e}}Object.defineProperty(t,"__esModule",{value:!0});var o=r(1),a=n(o),i=r(77),u=n(i),l=function(e){var t=e.feature;return a.default.createElement("li",null,a.default.createElement(u.default,{type:t.icon}),a.default.createElement("div",null,a.default.createElement("h4",null,t.title),a.default.createElement("p",null,t.content)))};t.default=l},,,,,function(e,t){},function(e,t,r){"use strict";function n(e){return e&&e.__esModule?e:{default:e}}function o(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}Object.defineProperty(t,"__esModule",{value:!0});var a=r(1),i=n(a),u=r(3),l=n(u),c=r(6),s=n(c);r(85);var f={type:l.default.string.isRequired},d=function(e){var t=e.type;return i.default.createElement("i",{className:(0,s.default)(o({"docsite-icon":!0},"docsite-icon-"+t,!0))})};d.propTypes=f,t.default=d},,,,,,function(e,t,r){"use strict";function n(e){return e&&e.__esModule?e:{default:e}}function o(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function a(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function i(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}Object.defineProperty(t,"__esModule",{value:!0});var u=function(){function e(e,t){for(var r=0;r66?e.setState({headerType:"normal"}):e.setState({headerType:"primary"})}),fetch("//api.github.com/repos/seata/seata").then(function(e){return e.json()}).then(function(t){e.setState({starCount:t.stargazers_count,forkCount:t.forks_count})})}},{key:"render",value:function(){var e=this.state,t=e.starCount,r=e.forkCount,n=this.getLanguage(),o=S.default[n],a=this.state.headerType,i="primary"===a?"/img/seata_logo_white.png":"/img/seata_logo.png";return c.default.createElement("div",{className:"home-page"},c.default.createElement("section",{className:"top-section"},c.default.createElement(h.default,{currentKey:"home",type:a,logo:i,language:n,onLanguageChange:this.onLanguageChange}),c.default.createElement("div",{className:"top-body"},c.default.createElement("div",{className:"vertical-middle"},c.default.createElement("div",{className:"product-name"},c.default.createElement("h2",null,o.brand.brandName)),c.default.createElement("p",{className:"product-desc"},o.brand.briefIntroduction),c.default.createElement("div",{className:"button-area"},o.brand.buttons.map(function(e){return c.default.createElement(m.default,{type:e.type,key:e.type,link:e.link,target:e.target},e.text)})),c.default.createElement("div",{className:"github-buttons"},c.default.createElement("a",{href:"https://github.com/seata/seata",target:"_blank",rel:"noopener noreferrer"},c.default.createElement("div",{className:"star"},c.default.createElement("img",{src:"https://img.alicdn.com/tfs/TB1FlB1JwHqK1RjSZFPXXcwapXa-32-32.png"}),c.default.createElement("span",{className:"type"},"Star"),c.default.createElement("span",{className:"line"}),c.default.createElement("span",{className:"count"},t))),c.default.createElement("a",{href:"https://github.com/seata/seata/fork",target:"_blank",rel:"noopener noreferrer"},c.default.createElement("div",{className:"fork"},c.default.createElement("img",{src:"https://img.alicdn.com/tfs/TB1zbxSJwDqK1RjSZSyXXaxEVXa-32-32.png"}),c.default.createElement("span",{className:"type"},"Fork"),c.default.createElement("span",{className:"line"}),c.default.createElement("span",{className:"count"},r))))),c.default.createElement("div",{className:"animation"},c.default.createElement("img",{className:"img1",src:"//img.alicdn.com/tfs/TB1evnpJhnaK1RjSZFBXXcW7VXa-702-312.png"}),c.default.createElement("img",{className:"img2",src:"//img.alicdn.com/tfs/TB1iau9JcbpK1RjSZFyXXX_qFXa-914-1156.png"}),c.default.createElement("div",{className:"outer-circle"}),c.default.createElement("div",{className:"rotate-circle"},c.default.createElement("svg",{viewBox:"0 0 404 404",xmlns:"http://www.w3.org/2000/svg"},c.default.createElement("defs",null,c.default.createElement("linearGradient",{id:"linear",x1:"0%",y1:"0%",x2:"100%",y2:"0%"},c.default.createElement("stop",{offset:"0%",stopColor:"rgba(17, 186, 250, 1)"}),c.default.createElement("stop",{offset:"50%",stopColor:"rgba(17, 186, 250, 0.1)"}),c.default.createElement("stop",{offset:"50%",stopColor:"rgba(17, 186, 250, 1)"}),c.default.createElement("stop",{offset:"100%",stopColor:"rgba(17, 186, 250, 0.1)"}))),c.default.createElement("circle",{cx:"202",cy:"202",r:"200",fill:"rgba(0, 0, 0, 0)",stroke:"url(#linear)",strokeWidth:"4"}))),c.default.createElement("img",{className:"img3",src:"//img.alicdn.com/tfs/TB1EBu.JgHqK1RjSZJnXXbNLpXa-914-1156.png"}),c.default.createElement("img",{className:"img4",src:"//img.alicdn.com/tfs/TB115i2JmzqK1RjSZPxXXc4tVXa-186-78.png"}),c.default.createElement("img",{className:"img5",src:"//img.alicdn.com/tfs/TB115i2JmzqK1RjSZPxXXc4tVXa-186-78.png"})))),c.default.createElement("section",{className:"introduction-section"},c.default.createElement("div",{className:"introduction-body"},c.default.createElement("div",{className:"introduction"},c.default.createElement("h3",null,o.introduction.title),c.default.createElement("p",null,o.introduction.desc)),c.default.createElement("img",{src:(0,d.getLink)(o.introduction.img)}))),c.default.createElement("section",{className:"feature-section"},c.default.createElement("div",{className:"feature-container"},c.default.createElement("h3",null,o.features.title),c.default.createElement("ul",null,o.features.list.map(function(e,t){return c.default.createElement(O.default,{feature:e,key:t})})))),c.default.createElement(b.default,{logo:"/img/seata_logo_gray.png",language:n}))}}]),t}(w.default);document.getElementById("root")&&f.default.render(c.default.createElement(k,null),document.getElementById("root")),t.default=k},function(e,t){},function(e,t){}]); \ No newline at end of file +!function(){"use strict";function n(){for(var e=[],t=0;t1){if(o=e({path:"/"},r.defaults,o),"number"==typeof o.expires){var s=new Date;s.setMilliseconds(s.getMilliseconds()+864e5*o.expires),o.expires=s}o.expires=o.expires?o.expires.toUTCString():"";try{i=JSON.stringify(a),/^[\{\[]/.test(i)&&(a=i)}catch(e){}a=n.write?n.write(a,t):encodeURIComponent(String(a)).replace(/%(23|24|26|2B|3A|3C|3E|3D|2F|3F|40|5B|5D|5E|60|7B|7D|7C)/g,decodeURIComponent),t=encodeURIComponent(String(t)),t=t.replace(/%(23|24|26|2B|5E|60|7C)/g,decodeURIComponent),t=t.replace(/[\(\)]/g,escape);var l="";for(var u in o)o[u]&&(l+="; "+u,!0!==o[u]&&(l+="="+o[u]));return document.cookie=t+"="+a+l}t||(i={});for(var c=document.cookie?document.cookie.split("; "):[],f=/(%[0-9A-Z]{2})+/g,d=0;d-1?t:e}function p(e,t){t=t||{};var n=t.body;if(e instanceof p){if(e.bodyUsed)throw new TypeError("Already read");this.url=e.url,this.credentials=e.credentials,t.headers||(this.headers=new a(e.headers)),this.method=e.method,this.mode=e.mode,n||null==e._bodyInit||(n=e._bodyInit,e.bodyUsed=!0)}else this.url=String(e);if(this.credentials=t.credentials||this.credentials||"omit",!t.headers&&this.headers||(this.headers=new a(t.headers)),this.method=d(t.method||this.method||"GET"),this.mode=t.mode||this.mode||null,this.referrer=null,("GET"===this.method||"HEAD"===this.method)&&n)throw new TypeError("Body not allowed for GET or HEAD requests");this._initBody(n)}function h(e){var t=new FormData;return e.trim().split("&").forEach(function(e){if(e){var n=e.split("="),r=n.shift().replace(/\+/g," "),a=n.join("=").replace(/\+/g," ");t.append(decodeURIComponent(r),decodeURIComponent(a))}}),t}function m(e){var t=new a;return e.replace(/\r?\n[\t ]+/g," ").split(/\r?\n/).forEach(function(e){var n=e.split(":"),r=n.shift().trim();if(r){var a=n.join(":").trim();t.append(r,a)}}),t}function y(e,t){t||(t={}),this.type="default",this.status=void 0===t.status?200:t.status,this.ok=this.status>=200&&this.status<300,this.statusText="statusText"in t?t.statusText:"OK",this.headers=new a(t.headers),this.url=t.url||"",this._initBody(e)}if(!e.fetch){var g={searchParams:"URLSearchParams"in e,iterable:"Symbol"in e&&"iterator"in Symbol,blob:"FileReader"in e&&"Blob"in e&&function(){try{return new Blob,!0}catch(e){return!1}}(),formData:"FormData"in e,arrayBuffer:"ArrayBuffer"in e};if(g.arrayBuffer)var b=["[object Int8Array]","[object Uint8Array]","[object Uint8ClampedArray]","[object Int16Array]","[object Uint16Array]","[object Int32Array]","[object Uint32Array]","[object Float32Array]","[object Float64Array]"],v=function(e){return e&&DataView.prototype.isPrototypeOf(e)},w=ArrayBuffer.isView||function(e){return e&&b.indexOf(Object.prototype.toString.call(e))>-1};a.prototype.append=function(e,r){e=t(e),r=n(r);var a=this.map[e];this.map[e]=a?a+","+r:r},a.prototype.delete=function(e){delete this.map[t(e)]},a.prototype.get=function(e){return e=t(e),this.has(e)?this.map[e]:null},a.prototype.has=function(e){return this.map.hasOwnProperty(t(e))},a.prototype.set=function(e,r){this.map[t(e)]=n(r)},a.prototype.forEach=function(e,t){for(var n in this.map)this.map.hasOwnProperty(n)&&e.call(t,this.map[n],n,this)},a.prototype.keys=function(){var e=[];return this.forEach(function(t,n){e.push(n)}),r(e)},a.prototype.values=function(){var e=[];return this.forEach(function(t){e.push(t)}),r(e)},a.prototype.entries=function(){var e=[];return this.forEach(function(t,n){e.push([n,t])}),r(e)},g.iterable&&(a.prototype[Symbol.iterator]=a.prototype.entries);var E=["DELETE","GET","HEAD","OPTIONS","POST","PUT"];p.prototype.clone=function(){return new p(this,{body:this._bodyInit})},f.call(p.prototype),f.call(y.prototype),y.prototype.clone=function(){return new y(this._bodyInit,{status:this.status,statusText:this.statusText,headers:new a(this.headers),url:this.url})},y.error=function(){var e=new y(null,{status:0,statusText:""});return e.type="error",e};var _=[301,302,303,307,308];y.redirect=function(e,t){if(-1===_.indexOf(t))throw new RangeError("Invalid status code");return new y(null,{status:t,headers:{location:e}})},e.Headers=a,e.Request=p,e.Response=y,e.fetch=function(e,t){return new Promise(function(n,r){var a=new p(e,t),o=new XMLHttpRequest;o.onload=function(){var e={status:o.status,statusText:o.statusText,headers:m(o.getAllResponseHeaders()||"")};e.url="responseURL"in o?o.responseURL:e.headers.get("X-Request-URL");var t="response"in o?o.response:o.responseText;n(new y(t,e))},o.onerror=function(){r(new TypeError("Network request failed"))},o.ontimeout=function(){r(new TypeError("Network request failed"))},o.open(a.method,a.url,!0),"include"===a.credentials?o.withCredentials=!0:"omit"===a.credentials&&(o.withCredentials=!1),"responseType"in o&&g.blob&&(o.responseType="blob"),a.headers.forEach(function(e,t){o.setRequestHeader(t,e)}),o.send(void 0===a._bodyInit?null:a._bodyInit)})},e.fetch.polyfill=!0}}("undefined"!=typeof self?self:this)},,,,,,,,,,,,,,,,,,,,,,,,,function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default={"zh-cn":{brand:{brandName:"Seata",briefIntroduction:"Seata 是一款阿里巴巴开源的分布式事务解决方案,致力于在微服务架构下提供高性能和简单易用的分布式事务服务。",buttons:[{text:"快速入门",link:"/zh-cn/docs/overview/what-is-seata.html",type:"primary"},{text:"Github",link:"https://github.com/seata/seata",type:"normal"}]},introduction:{title:"Seata 是什么?",desc:"Seata 是一款开源的分布式事务解决方案,致力于在微服务架构下提供高性能和简单易用的分布式事务服务。在 Seata 开源之前,Seata 对应的内部版本在阿里经济体内部一直扮演着分布式一致性中间件的角色,帮助经济体平稳的度过历年的双11,对各BU业务进行了有力的支撑。经过多年沉淀与积累,商业化产品先后在阿里云、金融云进行售卖。2019.1 为了打造更加完善的技术生态和普惠技术成果,Seata 正式宣布对外开源,未来 Seata 将以社区共建的形式帮助其技术更加可靠与完备。",img:"https://img.alicdn.com/tfs/TB1rDpkJAvoK1RjSZPfXXXPKFXa-794-478.png"},features:{title:"特色功能",list:[{icon:"feature-1",title:"微服务框架支持",content:"目前已支持 Dubbo、Spring Cloud、Sofa-RPC、Motan 和 grpc 等RPC框架,其他框架持续集成中"},{icon:"feature-2",title:"AT 模式",content:"提供无侵入自动补偿的事务模式,目前已支持 MySQL、 Oracle 的AT模式、PostgreSQL、H2 开发中"},{icon:"feature-3",title:"TCC 模式",content:"支持 TCC 模式并可与 AT 混用,灵活度更高"},{icon:"feature-4",title:"SAGA 模式",content:"为长事务提供有效的解决方案"},{icon:"feature-5",title:"XA 模式(开发中)",content:"支持已实现 XA 接口的数据库的 XA 模式"},{icon:"feature-6",title:"高可用",content:"支持基于数据库存储的集群模式,水平扩展能力强"}]}},"en-us":{brand:{brandName:"Seata",briefIntroduction:"Seata is an Alibaba open source distributed transaction solution that delivers high performance and easy to use distributed transaction services under a microservices architecture.",buttons:[{text:"Get Started",link:"/en-us/docs/user/quickstart.html",type:"primary"},{text:"Github",link:"https://github.com/seata/seata",type:"normal"}]},introduction:{title:"What is Seata?",desc:"Seata is an open source distributed transaction solution that delivers high performance and easy to use distributed transaction services under a microservices architecture. Before the open-source of Seata, the internal version of Seata played a role of distributed consistency Middleware in Ali economy, helping the economy to survive the double 11 of the past years smoothly, and providing strong support for businesses of all departments. After years of precipitation and accumulation, commercial products have been sold in Alibaba cloud and financial cloud. 2019.1 in order to create a more complete technological ecology and inclusive technological achievements, Seata officially announced open source to the outside world. In the future, Seata will help its technology become more reliable and complete in the form of community building.",img:"https://img.alicdn.com/tfs/TB1rDpkJAvoK1RjSZPfXXXPKFXa-794-478.png"},features:{title:"Feature List",list:[{icon:"feature-1",title:"Microservices Framework Support",content:"RPC frameworks such as Dubbo, Spring Cloud, Sofa-RPC, Motan, and grpc are currently supported, and other frameworks are continuously integrated."},{icon:"feature-2",title:"AT mode",content:"Provides non-intrusive automatic compensation transaction mode, currently supports MySQL, Oracle's AT mode, PostgreSQL, In developing the H2."},{icon:"feature-3",title:"TCC mode",content:"Support TCC mode and mix with AT for greater flexibility."},{icon:"feature-4",title:"SAGA mode",content:"Provide an effective solution for long transactions."},{icon:"feature-5",title:"XA mode (under development)",content:"Support for XA schemas for databases that have implemented XA interfaces."},{icon:"feature-6",title:"High availability",content:"Support cluster mode based on database storage, strong horizontal scalability."}]}}}},function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}function a(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}Object.defineProperty(t,"__esModule",{value:!0});var o=n(0),i=r(o),s=n(2),l=r(s),u=n(5),c=r(u),f=n(1);n(64);var d={type:l.default.oneOf(["primary","normal"]),link:l.default.string,target:l.default.string},p={type:"primary",link:"",target:"_self"},h=function(e){return i.default.createElement("a",{className:(0,c.default)(a({button:!0},"button-"+e.type,!0)),target:e.target||"_self",href:(0,f.getLink)(e.link)},e.children)};h.propTypes=d,h.defaultProps=p,t.default=h},,,,,,,,function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}Object.defineProperty(t,"__esModule",{value:!0});var a=n(0),o=r(a),i=n(57),s=r(i),l=function(e){var t=e.feature;return o.default.createElement("li",null,o.default.createElement(s.default,{type:t.icon}),o.default.createElement("div",null,o.default.createElement("h4",null,t.title),o.default.createElement("p",null,t.content)))};t.default=l},,,,,function(e,t){},function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}function a(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}Object.defineProperty(t,"__esModule",{value:!0});var o=n(0),i=r(o),s=n(2),l=r(s),u=n(5),c=r(u);n(65);var f={type:l.default.string.isRequired},d=function(e){var t=e.type;return i.default.createElement("i",{className:(0,c.default)(a({"docsite-icon":!0},"docsite-icon-"+t,!0))})};d.propTypes=f,t.default=d},,,,,,function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}function a(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function o(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function i(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}Object.defineProperty(t,"__esModule",{value:!0});var s=function(){function e(e,t){for(var n=0;n66?e.setState({headerType:"normal"}):e.setState({headerType:"primary"})}),fetch("//api.github.com/repos/seata/seata").then(function(e){return e.json()}).then(function(t){e.setState({starCount:t.stargazers_count,forkCount:t.forks_count})})}},{key:"render",value:function(){var e=this.state,t=e.starCount,n=e.forkCount,r=this.getLanguage(),a=x.default[r],o=this.state.headerType,i="primary"===o?"/img/seata_logo_white.png":"/img/seata_logo.png";return u.default.createElement("div",{className:"home-page"},u.default.createElement("section",{className:"top-section"},u.default.createElement(h.default,{currentKey:"home",type:o,logo:i,language:r,onLanguageChange:this.onLanguageChange}),u.default.createElement("div",{className:"top-body"},u.default.createElement("div",{className:"vertical-middle"},u.default.createElement("div",{className:"product-name"},u.default.createElement("h2",null,a.brand.brandName)),u.default.createElement("p",{className:"product-desc"},a.brand.briefIntroduction),u.default.createElement("div",{className:"button-area"},a.brand.buttons.map(function(e){return u.default.createElement(y.default,{type:e.type,key:e.type,link:e.link,target:e.target},e.text)})),u.default.createElement("div",{className:"github-buttons"},u.default.createElement("a",{href:"https://github.com/seata/seata",target:"_blank",rel:"noopener noreferrer"},u.default.createElement("div",{className:"star"},u.default.createElement("img",{src:"https://img.alicdn.com/tfs/TB1FlB1JwHqK1RjSZFPXXcwapXa-32-32.png"}),u.default.createElement("span",{className:"type"},"Star"),u.default.createElement("span",{className:"line"}),u.default.createElement("span",{className:"count"},t))),u.default.createElement("a",{href:"https://github.com/seata/seata/fork",target:"_blank",rel:"noopener noreferrer"},u.default.createElement("div",{className:"fork"},u.default.createElement("img",{src:"https://img.alicdn.com/tfs/TB1zbxSJwDqK1RjSZSyXXaxEVXa-32-32.png"}),u.default.createElement("span",{className:"type"},"Fork"),u.default.createElement("span",{className:"line"}),u.default.createElement("span",{className:"count"},n))))),u.default.createElement("div",{className:"animation"},u.default.createElement("img",{className:"img1",src:"//img.alicdn.com/tfs/TB1evnpJhnaK1RjSZFBXXcW7VXa-702-312.png"}),u.default.createElement("img",{className:"img2",src:"//img.alicdn.com/tfs/TB1iau9JcbpK1RjSZFyXXX_qFXa-914-1156.png"}),u.default.createElement("div",{className:"outer-circle"}),u.default.createElement("div",{className:"rotate-circle"},u.default.createElement("svg",{viewBox:"0 0 404 404",xmlns:"http://www.w3.org/2000/svg"},u.default.createElement("defs",null,u.default.createElement("linearGradient",{id:"linear",x1:"0%",y1:"0%",x2:"100%",y2:"0%"},u.default.createElement("stop",{offset:"0%",stopColor:"rgba(17, 186, 250, 1)"}),u.default.createElement("stop",{offset:"50%",stopColor:"rgba(17, 186, 250, 0.1)"}),u.default.createElement("stop",{offset:"50%",stopColor:"rgba(17, 186, 250, 1)"}),u.default.createElement("stop",{offset:"100%",stopColor:"rgba(17, 186, 250, 0.1)"}))),u.default.createElement("circle",{cx:"202",cy:"202",r:"200",fill:"rgba(0, 0, 0, 0)",stroke:"url(#linear)",strokeWidth:"4"}))),u.default.createElement("img",{className:"img3",src:"//img.alicdn.com/tfs/TB1EBu.JgHqK1RjSZJnXXbNLpXa-914-1156.png"}),u.default.createElement("img",{className:"img4",src:"//img.alicdn.com/tfs/TB115i2JmzqK1RjSZPxXXc4tVXa-186-78.png"}),u.default.createElement("img",{className:"img5",src:"//img.alicdn.com/tfs/TB115i2JmzqK1RjSZPxXXc4tVXa-186-78.png"})))),u.default.createElement("section",{className:"introduction-section"},u.default.createElement("div",{className:"introduction-body"},u.default.createElement("div",{className:"introduction"},u.default.createElement("h3",null,a.introduction.title),u.default.createElement("p",null,a.introduction.desc)),u.default.createElement("img",{src:(0,d.getLink)(a.introduction.img)}))),u.default.createElement("section",{className:"feature-section"},u.default.createElement("div",{className:"feature-container"},u.default.createElement("h3",null,a.features.title),u.default.createElement("ul",null,a.features.list.map(function(e,t){return u.default.createElement(_.default,{feature:e,key:t})})))),u.default.createElement(b.default,{logo:"/img/seata_logo_gray.png",language:r}))}}]),t}(w.default);document.getElementById("root")&&f.default.render(u.default.createElement(S,null),document.getElementById("root")),t.default=S},function(e,t){},function(e,t){}]); \ No newline at end of file diff --git a/chinaz_verify_F15FA76EB8413EC9.html b/chinaz_verify_F15FA76EB8413EC9.html deleted file mode 100644 index 8d8293a3..00000000 --- a/chinaz_verify_F15FA76EB8413EC9.html +++ /dev/null @@ -1 +0,0 @@ -F15FA76EB8413EC9 \ No newline at end of file diff --git a/en-us/blog/download.html b/en-us/blog/download.html deleted file mode 100644 index 9f5eff17..00000000 --- a/en-us/blog/download.html +++ /dev/null @@ -1,334 +0,0 @@ - - - - - - - - - - Downloads - - - - -

Downloads

-

Seata

-
-

GitHub: https://github.com/seata/seata
-Release Notes: https://github.com/seata/seata/releases

-
-

0.9.0 (2019-10-16)

-

source | -binary

-
- Release notes -

Seata 0.9.0

-

Seata 0.9.0 Released.

-

Seata is an easy-to-use, high-performance, open source distributed transaction solution.

-

The version is updated as follows:

-

feature:

-
    -
  • [#1608] Saga implementation base on state machine
  • -
  • [#1625] support custom config and registry type
  • -
  • [#1656] support spring cloud config
  • -
  • [#1689] support -e startup parameter for specifying the environment name
  • -
  • [#1739] support retry when tm commit or rollback failed
  • -
-

bugfix:

-
    -
  • [#1605] fix deadlocks that can be caused by object locks and global locks and optimize the granularity of locks
  • -
  • [#1685] fix pk too long in lock table on db mode and optimize error log
  • -
  • [#1691] fix can't access private member of DruidDataSourceWrapper
  • -
  • [#1699] fix use 'in' and 'between' in where condition for Oracle and Mysql
  • -
  • [#1713] fix LockManagerTest.concurrentUseAbilityTest assertion condition
  • -
  • [#1720] fix can't refresh table meta data for oracle
  • -
  • [#1729] fix oracle batch insert error
  • -
  • [#1735] clean xid when tm commit or rollback failed
  • -
  • [#1749] fix undo support oracle table meta cache
  • -
  • [#1751] fix memory lock is not released due to hash conflict
  • -
  • [#1761] fix oracle rollback failed when the table has null Blob Clob value
  • -
  • [#1759] fix saga service method not support interface type parameter
  • -
  • [#1401] fix the first registration resource is null when RM starts
  • -
-

-

optimize:

-
    -
  • [#1701] remove unused imports
  • -
  • [#1705] Based on Java5 optimization
  • -
  • [#1706] optimize inner class to static class
  • -
  • [#1707] default charset use StandardCharsets.UTF_8 instead
  • -
  • [#1712] abstract undolog manager class
  • -
  • [#1722] simplify to make codes more readable
  • -
  • [#1726] format log messages
  • -
  • [#1738] add server's jvm parameters
  • -
  • [#1743] improve the efficiency of the batch log
  • -
  • [#1747] use raw types instead of boxing types
  • -
  • [#1750] abstract tableMeta cache class
  • -
  • [#1755] enhance test coverage of seata-common module
  • -
  • [#1756] security: upgrade jackson to avoid security vulnerabilities
  • -
  • [#1657] optimize the problem of large direct buffer when file rolling in file storage mode
  • -
-


-Thanks to these contributors for their code commits. Please report an unintended omission.
-​

- -

Also, we receive many valuable issues, questions and advices from our community. Thanks for you all.

-

Link

- -
-

0.8.1 (2019-09-18)

-

source | -binary

-
- Release notes -

Seata 0.8.1

-

Seata 0.8.1 Released.

-

Seata is an easy-to-use, high-performance, open source distributed transaction solution.

-

The version is updated as follows:

-

feature:

-
    -
  • [#1598] support profile to use absolute path
  • -
  • [#1617] support profile’s(registry.conf) name configurable
  • -
  • [#1418] support undo_log kryo serializer
  • -
  • [#1489] support protobuf maven plugin
  • -
  • [#1437] support kryo codec
  • -
  • [#1478] support db mock
  • -
  • [#1512] extended support for mysql and oracle multiple insert batch syntax
  • -
  • [#1496] support auto proxy of DataSource
  • -
-

bugfix:

-
    -
  • [#1646] fix selectForUpdate lockQuery exception in file mode
  • -
  • [#1572] fix get tablemeta fail in oracle when table name was lower case
  • -
  • [#1663] fix get tablemeta fail when table name was keyword
  • -
  • [#1666] fix restore connection's autocommit
  • -
  • [#1643] fix serialize and deserialize in java.sql.Blob, java.sql.Clob
  • -
  • [#1628] fix oracle support ROWNUM query
  • -
  • [#1552] fix BufferOverflow when BranchSession size too large
  • -
  • [#1609] fix thread unsafe of oracle keyword checker
  • -
  • [#1599] fix thread unsafe of mysql keyword checker
  • -
  • [#1607] fix NoSuchMethodError when the version of druid used < 1.1.3
  • -
  • [#1581] fix missing some length in GlobalSession and FileTransactionStoreManager
  • -
  • [#1594] fix nacos's default namespace
  • -
  • [#1550] fix calculate BranchSession size missing xidBytes.length
  • -
  • [#1558] fix NPE when the rpcMessage's body is null
  • -
  • [#1505] fix bind public network address listen failed
  • -
  • [#1539] fix nacos namespace setting does not take effect
  • -
  • [#1537] fix nacos-config.txt missing store.db.driver-class-name property
  • -
  • [#1522] fix ProtocolV1CodecTest testAll may be appears test not pass
  • -
  • [#1525] fix when getAfterImage error, trx autocommit
  • -
  • [#1518] fix EnhancedServiceLoader may be appears load class error
  • -
  • [#1514] fix when lack serialization dependence can't generate undolog and report true
  • -
  • [#1445] fix DefaultCoordinatorMetricsTest UT failed
  • -
  • [#1481] fix TableMetaCache refresh problem in multiple datasource
  • -
-

optimize:

-
    -
  • [#1629] optimize the watcher efficiency of etcd3
  • -
  • [#1661] optimize global_table insert transaction_name size
  • -
  • [#1633] optimize branch transaction repeated reporting false
  • -
  • [#1654] optimize wrong usage of slf4j
  • -
  • [#1593] optimize and standardize server log
  • -
  • [#1648] optimize transaction_name length when building the table
  • -
  • [#1576] eliminate the impact of instructions reordering on session async committing task
  • -
  • [#1618] optimize undolog manager and fix delete undolog support oracle
  • -
  • [#1469] reduce the number of lock conflict exception
  • -
  • [#1619] replace StringBuffer with StringBuilder
  • -
  • [#1580] optimize LockKeyConflictException and change register method
  • -
  • [#1574] optimize once delete GlobalSession locks for db mode when commit success
  • -
  • [#1601] optimize typo
  • -
  • [#1602] upgrade fastjson version to 1.2.60 for security issue
  • -
  • [#1583] optimize get oracle primary index
  • -
  • [#1575] add UT for RegisterTMRequest
  • -
  • [#1559] optimize delay to delete the expired undo log
  • -
  • [#1547] TableRecords delete jackson annotation
  • -
  • [#1542] optimize AbstractSessionManager debug log
  • -
  • [#1535] remove H2 and pgsql get primary index code and close resultSet
  • -
  • [#1541] code clean
  • -
  • [#1544] remove Chinese comment
  • -
  • [#1533] refactor of the logics of Multi-configuration Isolation
  • -
  • [#1493] add table meta checker switch
  • -
  • [#1530] throw Exception when no index in the table
  • -
  • [#1444] simplify operation of map
  • -
  • [#1497] add seata-all dependencies
  • -
  • [#1490] remove unnecessary code
  • -
-

Thanks to these contributors for their code commits. Please report an unintended omission.

- -

Also, we receive many valuable issues, questions and advices from our community. Thanks for you all.

-

Link

- -
-

0.8.0 (2019-08-16)

- -

0.7.1 (2019-07-15)

- -

0.7.0 (2019-07-12)

- -

0.6.1 (2019-05-31)

- -

0.6.0 (2019-05-24)

- -

0.5.2 (2019-05-17)

- -

0.5.1 (2019-04-30)

- -

0.5.0 (2019-04-19)

- -

0.4.2 (2019-04-12)

- -

0.4.1 (2019-03-29)

- -

0.4.0 (2019-03-19)

- -

0.3.1 (2019-03-15)

- -

0.3.0 (2019-03-08)

- -

0.2.3 (2019-03-02)

- -

0.2.2 (2019-02-22)

- -

0.2.1 (2019-02-18)

- -

0.2.0 (2019-02-14)

- -

0.1.4 (2019-02-11)

- -

0.1.3 (2019-01-29)

- -

0.1.2 (2019-01-25)

- -

0.1.1 (2019-01-18)

- -

0.1.0 (2019-01-09)

- -
- - - - - - - diff --git a/en-us/blog/download.json b/en-us/blog/download.json deleted file mode 100644 index 1de7da18..00000000 --- a/en-us/blog/download.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "download.md", - "__html": "

Downloads

\n

Seata

\n
\n

GitHub: https://github.com/seata/seata
\nRelease Notes: https://github.com/seata/seata/releases

\n
\n

0.9.0 (2019-10-16)

\n

source |\nbinary

\n
\n Release notes\n

Seata 0.9.0

\n

Seata 0.9.0 Released.

\n

Seata is an easy-to-use, high-performance, open source distributed transaction solution.

\n

The version is updated as follows:

\n

feature:

\n
    \n
  • [#1608] Saga implementation base on state machine
  • \n
  • [#1625] support custom config and registry type
  • \n
  • [#1656] support spring cloud config
  • \n
  • [#1689] support -e startup parameter for specifying the environment name
  • \n
  • [#1739] support retry when tm commit or rollback failed
  • \n
\n

bugfix:

\n
    \n
  • [#1605] fix deadlocks that can be caused by object locks and global locks and optimize the granularity of locks
  • \n
  • [#1685] fix pk too long in lock table on db mode and optimize error log
  • \n
  • [#1691] fix can't access private member of DruidDataSourceWrapper
  • \n
  • [#1699] fix use 'in' and 'between' in where condition for Oracle and Mysql
  • \n
  • [#1713] fix LockManagerTest.concurrentUseAbilityTest assertion condition
  • \n
  • [#1720] fix can't refresh table meta data for oracle
  • \n
  • [#1729] fix oracle batch insert error
  • \n
  • [#1735] clean xid when tm commit or rollback failed
  • \n
  • [#1749] fix undo support oracle table meta cache
  • \n
  • [#1751] fix memory lock is not released due to hash conflict
  • \n
  • [#1761] fix oracle rollback failed when the table has null Blob Clob value
  • \n
  • [#1759] fix saga service method not support interface type parameter
  • \n
  • [#1401] fix the first registration resource is null when RM starts
  • \n
\n

\n

optimize:

\n
    \n
  • [#1701] remove unused imports
  • \n
  • [#1705] Based on Java5 optimization
  • \n
  • [#1706] optimize inner class to static class
  • \n
  • [#1707] default charset use StandardCharsets.UTF_8 instead
  • \n
  • [#1712] abstract undolog manager class
  • \n
  • [#1722] simplify to make codes more readable
  • \n
  • [#1726] format log messages
  • \n
  • [#1738] add server's jvm parameters
  • \n
  • [#1743] improve the efficiency of the batch log
  • \n
  • [#1747] use raw types instead of boxing types
  • \n
  • [#1750] abstract tableMeta cache class
  • \n
  • [#1755] enhance test coverage of seata-common module
  • \n
  • [#1756] security: upgrade jackson to avoid security vulnerabilities
  • \n
  • [#1657] optimize the problem of large direct buffer when file rolling in file storage mode
  • \n
\n


\nThanks to these contributors for their code commits. Please report an unintended omission.
\n​

\n\n

Also, we receive many valuable issues, questions and advices from our community. Thanks for you all.

\n

Link

\n\n
\n

0.8.1 (2019-09-18)

\n

source |\nbinary

\n
\n Release notes\n

Seata 0.8.1

\n

Seata 0.8.1 Released.

\n

Seata is an easy-to-use, high-performance, open source distributed transaction solution.

\n

The version is updated as follows:

\n

feature:

\n
    \n
  • [#1598] support profile to use absolute path
  • \n
  • [#1617] support profile’s(registry.conf) name configurable
  • \n
  • [#1418] support undo_log kryo serializer
  • \n
  • [#1489] support protobuf maven plugin
  • \n
  • [#1437] support kryo codec
  • \n
  • [#1478] support db mock
  • \n
  • [#1512] extended support for mysql and oracle multiple insert batch syntax
  • \n
  • [#1496] support auto proxy of DataSource
  • \n
\n

bugfix:

\n
    \n
  • [#1646] fix selectForUpdate lockQuery exception in file mode
  • \n
  • [#1572] fix get tablemeta fail in oracle when table name was lower case
  • \n
  • [#1663] fix get tablemeta fail when table name was keyword
  • \n
  • [#1666] fix restore connection's autocommit
  • \n
  • [#1643] fix serialize and deserialize in java.sql.Blob, java.sql.Clob
  • \n
  • [#1628] fix oracle support ROWNUM query
  • \n
  • [#1552] fix BufferOverflow when BranchSession size too large
  • \n
  • [#1609] fix thread unsafe of oracle keyword checker
  • \n
  • [#1599] fix thread unsafe of mysql keyword checker
  • \n
  • [#1607] fix NoSuchMethodError when the version of druid used < 1.1.3
  • \n
  • [#1581] fix missing some length in GlobalSession and FileTransactionStoreManager
  • \n
  • [#1594] fix nacos's default namespace
  • \n
  • [#1550] fix calculate BranchSession size missing xidBytes.length
  • \n
  • [#1558] fix NPE when the rpcMessage's body is null
  • \n
  • [#1505] fix bind public network address listen failed
  • \n
  • [#1539] fix nacos namespace setting does not take effect
  • \n
  • [#1537] fix nacos-config.txt missing store.db.driver-class-name property
  • \n
  • [#1522] fix ProtocolV1CodecTest testAll may be appears test not pass
  • \n
  • [#1525] fix when getAfterImage error, trx autocommit
  • \n
  • [#1518] fix EnhancedServiceLoader may be appears load class error
  • \n
  • [#1514] fix when lack serialization dependence can't generate undolog and report true
  • \n
  • [#1445] fix DefaultCoordinatorMetricsTest UT failed
  • \n
  • [#1481] fix TableMetaCache refresh problem in multiple datasource
  • \n
\n

optimize:

\n
    \n
  • [#1629] optimize the watcher efficiency of etcd3
  • \n
  • [#1661] optimize global_table insert transaction_name size
  • \n
  • [#1633] optimize branch transaction repeated reporting false
  • \n
  • [#1654] optimize wrong usage of slf4j
  • \n
  • [#1593] optimize and standardize server log
  • \n
  • [#1648] optimize transaction_name length when building the table
  • \n
  • [#1576] eliminate the impact of instructions reordering on session async committing task
  • \n
  • [#1618] optimize undolog manager and fix delete undolog support oracle
  • \n
  • [#1469] reduce the number of lock conflict exception
  • \n
  • [#1619] replace StringBuffer with StringBuilder
  • \n
  • [#1580] optimize LockKeyConflictException and change register method
  • \n
  • [#1574] optimize once delete GlobalSession locks for db mode when commit success
  • \n
  • [#1601] optimize typo
  • \n
  • [#1602] upgrade fastjson version to 1.2.60 for security issue
  • \n
  • [#1583] optimize get oracle primary index
  • \n
  • [#1575] add UT for RegisterTMRequest
  • \n
  • [#1559] optimize delay to delete the expired undo log
  • \n
  • [#1547] TableRecords delete jackson annotation
  • \n
  • [#1542] optimize AbstractSessionManager debug log
  • \n
  • [#1535] remove H2 and pgsql get primary index code and close resultSet
  • \n
  • [#1541] code clean
  • \n
  • [#1544] remove Chinese comment
  • \n
  • [#1533] refactor of the logics of Multi-configuration Isolation
  • \n
  • [#1493] add table meta checker switch
  • \n
  • [#1530] throw Exception when no index in the table
  • \n
  • [#1444] simplify operation of map
  • \n
  • [#1497] add seata-all dependencies
  • \n
  • [#1490] remove unnecessary code
  • \n
\n

Thanks to these contributors for their code commits. Please report an unintended omission.

\n\n

Also, we receive many valuable issues, questions and advices from our community. Thanks for you all.

\n

Link

\n\n
\n

0.8.0 (2019-08-16)

\n\n

0.7.1 (2019-07-15)

\n\n

0.7.0 (2019-07-12)

\n\n

0.6.1 (2019-05-31)

\n\n

0.6.0 (2019-05-24)

\n\n

0.5.2 (2019-05-17)

\n\n

0.5.1 (2019-04-30)

\n\n

0.5.0 (2019-04-19)

\n\n

0.4.2 (2019-04-12)

\n\n

0.4.1 (2019-03-29)

\n\n

0.4.0 (2019-03-19)

\n\n

0.3.1 (2019-03-15)

\n\n

0.3.0 (2019-03-08)

\n\n

0.2.3 (2019-03-02)

\n\n

0.2.2 (2019-02-22)

\n\n

0.2.1 (2019-02-18)

\n\n

0.2.0 (2019-02-14)

\n\n

0.1.4 (2019-02-11)

\n\n

0.1.3 (2019-01-29)

\n\n

0.1.2 (2019-01-25)

\n\n

0.1.1 (2019-01-18)

\n\n

0.1.0 (2019-01-09)

\n\n", - "link": "/en-us/blog/download.html", - "meta": { - "title": "Downloads", - "keywords": "Seata, Downloads, Version", - "description": "This article will introduce you how to understand the details of each version and upgrade matters needing attention." - } -} \ No newline at end of file diff --git a/en-us/blog/dubbo-seata.html b/en-us/blog/dubbo-seata.html deleted file mode 100644 index e291e5aa..00000000 --- a/en-us/blog/dubbo-seata.html +++ /dev/null @@ -1,205 +0,0 @@ - - - - - - - - - - How to use Seata to ensure consistency between Dubbo Microservices - - - - -

How to use Seata to ensure consistency between Dubbo Microservices

-

Use case

-

A business logic for user purchasing commodities. The whole business logic is powered by 3 microservices:

-
    -
  • Storage service: deduct storage count on given commodity.
  • -
  • Order service: create order according to purchase request.
  • -
  • Account service: debit the balance of user's account.
  • -
-

Architecture

-

Architecture

-

StorageService

-
public interface StorageService {
-
-    /**
-     * deduct storage count
-     */
-    void deduct(String commodityCode, int count);
-}
-
-

OrderService

-
public interface OrderService {
-
-    /**
-     * create order
-     */
-    Order create(String userId, String commodityCode, int orderCount);
-}
-
-

AccountService

-
public interface AccountService {
-
-    /**
-     * debit balance of user's account
-     */
-    void debit(String userId, int money);
-}
-
-

Main business logic

-
public class BusinessServiceImpl implements BusinessService {
-
-    private StorageService storageService;
-
-    private OrderService orderService;
-
-    /**
-     * purchase
-     */
-    public void purchase(String userId, String commodityCode, int orderCount) {
-
-        storageService.deduct(commodityCode, orderCount);
-
-        orderService.create(userId, commodityCode, orderCount);
-    }
-}
-
-
public class StorageServiceImpl implements StorageService {
-
-  private StorageDAO storageDAO;
-  
-    @Override
-    public void deduct(String commodityCode, int count) {
-        Storage storage = new Storage();
-        storage.setCount(count);
-        storage.setCommodityCode(commodityCode);
-        storageDAO.update(storage);
-    }
-}
-
-
public class OrderServiceImpl implements OrderService {
-
-    private OrderDAO orderDAO;
-
-    private AccountService accountService;
-
-    public Order create(String userId, String commodityCode, int orderCount) {
-
-        int orderMoney = calculate(commodityCode, orderCount);
-
-        accountService.debit(userId, orderMoney);
-
-        Order order = new Order();
-        order.userId = userId;
-        order.commodityCode = commodityCode;
-        order.count = orderCount;
-        order.money = orderMoney;
-
-        return orderDAO.insert(order);
-    }
-}
-
-

Distributed Transaction Solution with Seata

-

undefined

-

We just need an annotation @GlobalTransactional on business method:

-

-    @GlobalTransactional
-    public void purchase(String userId, String commodityCode, int orderCount) {
-        ......
-    }
-
-

Example powered by Dubbo + Seata

-

Step 1: Setup database

-
    -
  • Requirement: MySQL with InnoDB engine.
  • -
-

Note: In fact, there should be 3 database for the 3 services in the example use case. However, we can just create one database and configure 3 data sources for simple.

-

Modify Spring XML with the database URL/username/password you just created.

-

dubbo-account-service.xml -dubbo-order-service.xml -dubbo-storage-service.xml

-
    <property name="url" value="jdbc:mysql://x.x.x.x:3306/xxx" />
-    <property name="username" value="xxx" />
-    <property name="password" value="xxx" />
-
-

Step 2: Create UNDO_LOG table for Seata

-

UNDO_LOG table is required by Seata AT mode.

-
CREATE TABLE `undo_log` (
-  `id` bigint(20) NOT NULL AUTO_INCREMENT,
-  `branch_id` bigint(20) NOT NULL,
-  `xid` varchar(100) NOT NULL,
-  `rollback_info` longblob NOT NULL,
-  `log_status` int(11) NOT NULL,
-  `log_created` datetime NOT NULL,
-  `log_modified` datetime NOT NULL,
-  `ext` varchar(100) DEFAULT NULL,
-  PRIMARY KEY (`id`),
-  KEY `idx_unionkey` (`xid`,`branch_id`)
-) ENGINE=InnoDB AUTO_INCREMENT=159 DEFAULT CHARSET=utf8
-
-

Step 3: Create tables for example business

-

-DROP TABLE IF EXISTS `storage_tbl`;
-CREATE TABLE `storage_tbl` (
-  `id` int(11) NOT NULL AUTO_INCREMENT,
-  `commodity_code` varchar(255) DEFAULT NULL,
-  `count` int(11) DEFAULT 0,
-  PRIMARY KEY (`id`),
-  UNIQUE KEY (`commodity_code`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-
-
-DROP TABLE IF EXISTS `order_tbl`;
-CREATE TABLE `order_tbl` (
-  `id` int(11) NOT NULL AUTO_INCREMENT,
-  `user_id` varchar(255) DEFAULT NULL,
-  `commodity_code` varchar(255) DEFAULT NULL,
-  `count` int(11) DEFAULT 0,
-  `money` int(11) DEFAULT 0,
-  PRIMARY KEY (`id`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-
-
-DROP TABLE IF EXISTS `account_tbl`;
-CREATE TABLE `account_tbl` (
-  `id` int(11) NOT NULL AUTO_INCREMENT,
-  `user_id` varchar(255) DEFAULT NULL,
-  `money` int(11) DEFAULT 0,
-  PRIMARY KEY (`id`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-
-

Step 4: Start Seata-Server

-
    -
  • Download server package, unzip it.
  • -
  • Start Seata-Server
  • -
-
sh seata-server.sh $LISTEN_PORT $PATH_FOR_PERSISTENT_DATA
-
-e.g.
-
-sh seata-server.sh 8091 /home/admin/seata/data/
-
-

Step 5: Run example

- -

Related projects

- -
- - - - - - diff --git a/en-us/blog/dubbo-seata.json b/en-us/blog/dubbo-seata.json deleted file mode 100644 index 00b784ec..00000000 --- a/en-us/blog/dubbo-seata.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "filename": "dubbo-seata.md", - "__html": "

How to use Seata to ensure consistency between Dubbo Microservices

\n

Use case

\n

A business logic for user purchasing commodities. The whole business logic is powered by 3 microservices:

\n
    \n
  • Storage service: deduct storage count on given commodity.
  • \n
  • Order service: create order according to purchase request.
  • \n
  • Account service: debit the balance of user's account.
  • \n
\n

Architecture

\n

\"Architecture\"

\n

StorageService

\n
public interface StorageService {\n\n    /**\n     * deduct storage count\n     */\n    void deduct(String commodityCode, int count);\n}\n
\n

OrderService

\n
public interface OrderService {\n\n    /**\n     * create order\n     */\n    Order create(String userId, String commodityCode, int orderCount);\n}\n
\n

AccountService

\n
public interface AccountService {\n\n    /**\n     * debit balance of user's account\n     */\n    void debit(String userId, int money);\n}\n
\n

Main business logic

\n
public class BusinessServiceImpl implements BusinessService {\n\n    private StorageService storageService;\n\n    private OrderService orderService;\n\n    /**\n     * purchase\n     */\n    public void purchase(String userId, String commodityCode, int orderCount) {\n\n        storageService.deduct(commodityCode, orderCount);\n\n        orderService.create(userId, commodityCode, orderCount);\n    }\n}\n
\n
public class StorageServiceImpl implements StorageService {\n\n  private StorageDAO storageDAO;\n  \n    @Override\n    public void deduct(String commodityCode, int count) {\n        Storage storage = new Storage();\n        storage.setCount(count);\n        storage.setCommodityCode(commodityCode);\n        storageDAO.update(storage);\n    }\n}\n
\n
public class OrderServiceImpl implements OrderService {\n\n    private OrderDAO orderDAO;\n\n    private AccountService accountService;\n\n    public Order create(String userId, String commodityCode, int orderCount) {\n\n        int orderMoney = calculate(commodityCode, orderCount);\n\n        accountService.debit(userId, orderMoney);\n\n        Order order = new Order();\n        order.userId = userId;\n        order.commodityCode = commodityCode;\n        order.count = orderCount;\n        order.money = orderMoney;\n\n        return orderDAO.insert(order);\n    }\n}\n
\n

Distributed Transaction Solution with Seata

\n

\"undefined\"

\n

We just need an annotation @GlobalTransactional on business method:

\n
\n    @GlobalTransactional\n    public void purchase(String userId, String commodityCode, int orderCount) {\n        ......\n    }\n
\n

Example powered by Dubbo + Seata

\n

Step 1: Setup database

\n
    \n
  • Requirement: MySQL with InnoDB engine.
  • \n
\n

Note: In fact, there should be 3 database for the 3 services in the example use case. However, we can just create one database and configure 3 data sources for simple.

\n

Modify Spring XML with the database URL/username/password you just created.

\n

dubbo-account-service.xml\ndubbo-order-service.xml\ndubbo-storage-service.xml

\n
    <property name=\"url\" value=\"jdbc:mysql://x.x.x.x:3306/xxx\" />\n    <property name=\"username\" value=\"xxx\" />\n    <property name=\"password\" value=\"xxx\" />\n
\n

Step 2: Create UNDO_LOG table for Seata

\n

UNDO_LOG table is required by Seata AT mode.

\n
CREATE TABLE `undo_log` (\n  `id` bigint(20) NOT NULL AUTO_INCREMENT,\n  `branch_id` bigint(20) NOT NULL,\n  `xid` varchar(100) NOT NULL,\n  `rollback_info` longblob NOT NULL,\n  `log_status` int(11) NOT NULL,\n  `log_created` datetime NOT NULL,\n  `log_modified` datetime NOT NULL,\n  `ext` varchar(100) DEFAULT NULL,\n  PRIMARY KEY (`id`),\n  KEY `idx_unionkey` (`xid`,`branch_id`)\n) ENGINE=InnoDB AUTO_INCREMENT=159 DEFAULT CHARSET=utf8\n
\n

Step 3: Create tables for example business

\n
\nDROP TABLE IF EXISTS `storage_tbl`;\nCREATE TABLE `storage_tbl` (\n  `id` int(11) NOT NULL AUTO_INCREMENT,\n  `commodity_code` varchar(255) DEFAULT NULL,\n  `count` int(11) DEFAULT 0,\n  PRIMARY KEY (`id`),\n  UNIQUE KEY (`commodity_code`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n\nDROP TABLE IF EXISTS `order_tbl`;\nCREATE TABLE `order_tbl` (\n  `id` int(11) NOT NULL AUTO_INCREMENT,\n  `user_id` varchar(255) DEFAULT NULL,\n  `commodity_code` varchar(255) DEFAULT NULL,\n  `count` int(11) DEFAULT 0,\n  `money` int(11) DEFAULT 0,\n  PRIMARY KEY (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n\nDROP TABLE IF EXISTS `account_tbl`;\nCREATE TABLE `account_tbl` (\n  `id` int(11) NOT NULL AUTO_INCREMENT,\n  `user_id` varchar(255) DEFAULT NULL,\n  `money` int(11) DEFAULT 0,\n  PRIMARY KEY (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n
\n

Step 4: Start Seata-Server

\n
    \n
  • Download server package, unzip it.
  • \n
  • Start Seata-Server
  • \n
\n
sh seata-server.sh $LISTEN_PORT $PATH_FOR_PERSISTENT_DATA\n\ne.g.\n\nsh seata-server.sh 8091 /home/admin/seata/data/\n
\n

Step 5: Run example

\n\n

Related projects

\n\n", - "link": "/en-us/blog/dubbo-seata.html", - "meta": { - "title": "How to use Seata to ensure consistency between Dubbo Microservices", - "keywords": "Dubbo,Seata,Consistency", - "description": "This article will introduce you how to use Seata to ensure consistency between Dubbo Microservices.", - "author": "slievrly", - "date": "2019-03-07" - } -} \ No newline at end of file diff --git a/en-us/blog/index.html b/en-us/blog/index.html deleted file mode 100644 index 0654b65a..00000000 --- a/en-us/blog/index.html +++ /dev/null @@ -1,32 +0,0 @@ - - - - - - - - - - Blog - - - - - - - - - - - - diff --git a/en-us/blog/manual-transaction-mode.html b/en-us/blog/manual-transaction-mode.html deleted file mode 100644 index 1cea1137..00000000 --- a/en-us/blog/manual-transaction-mode.html +++ /dev/null @@ -1,53 +0,0 @@ - - - - - - - - - - MT mode - - - - -

Manual Transaction Mode

-

Review the description in the overview: a distributed global transaction, the whole is a model of the two-phase commit. A global transaction consists of several branch transactions that meet the model requirements of the two-phase commit, which requires each branch transaction to have its own:

-
    -
  • One phase prepare behavior
  • -
  • Two phase commit or rollback behavior
  • -
-

Overview of a global transaction

-

According to the two phase behavior pattern,We divide the branch transaction into Automatic (Branch) Transaction Mode and Manual (Branch) Transaction Mode.

-

The AT mode(Reference Link TBD)is based on the Relational Database that supports local ACID transactions

-
    -
  • One phase prepare behavior: In the local transaction, the business data update and the corresponding rollback log record are submitted together.
  • -
  • Two phase commit behavior: Immediately ended successfully, Auto asynchronous batch cleanup of the rollback log.
  • -
  • Two phase rollback behavior: By rolling back the log, automatic generates a compensation operation to complete the data rollback.
  • -
-

Accordingly, the MT mode does not rely on transaction support for the underlying data resources:

-
    -
  • One phase prepare behavior: Call the prepare logic of custom .
  • -
  • Two phase commit behavior:Call the commit logic of custom .
  • -
  • Two phase rollback behavior:Call the rollback logic of custom .
  • -
-

The so-called MT mode refers to the support of the branch transaction of custom into the management of global transactions.

-
- - - - - - - diff --git a/en-us/blog/manual-transaction-mode.json b/en-us/blog/manual-transaction-mode.json deleted file mode 100644 index ebbfc070..00000000 --- a/en-us/blog/manual-transaction-mode.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "filename": "manual-transaction-mode.md", - "__html": "

Manual Transaction Mode

\n

Review the description in the overview: a distributed global transaction, the whole is a model of the two-phase commit. A global transaction consists of several branch transactions that meet the model requirements of the two-phase commit, which requires each branch transaction to have its own:

\n
    \n
  • One phase prepare behavior
  • \n
  • Two phase commit or rollback behavior
  • \n
\n

\"Overview

\n

According to the two phase behavior pattern,We divide the branch transaction into Automatic (Branch) Transaction Mode and Manual (Branch) Transaction Mode.

\n

The AT mode(Reference Link TBD)is based on the Relational Database that supports local ACID transactions

\n
    \n
  • One phase prepare behavior: In the local transaction, the business data update and the corresponding rollback log record are submitted together.
  • \n
  • Two phase commit behavior: Immediately ended successfully, Auto asynchronous batch cleanup of the rollback log.
  • \n
  • Two phase rollback behavior: By rolling back the log, automatic generates a compensation operation to complete the data rollback.
  • \n
\n

Accordingly, the MT mode does not rely on transaction support for the underlying data resources:

\n
    \n
  • One phase prepare behavior: Call the prepare logic of custom .
  • \n
  • Two phase commit behavior:Call the commit logic of custom .
  • \n
  • Two phase rollback behavior:Call the rollback logic of custom .
  • \n
\n

The so-called MT mode refers to the support of the branch transaction of custom into the management of global transactions.

\n", - "link": "/en-us/blog/manual-transaction-mode.html", - "meta": { - "title": "MT mode", - "keywords": "MT mode", - "description": "introduce MT mode", - "author": "kmmshmily", - "date": "2019-02-13" - } -} \ No newline at end of file diff --git a/en-us/blog/quick-start-use-seata-and-dubbo-services.html b/en-us/blog/quick-start-use-seata-and-dubbo-services.html deleted file mode 100644 index 9d0dfbce..00000000 --- a/en-us/blog/quick-start-use-seata-and-dubbo-services.html +++ /dev/null @@ -1,214 +0,0 @@ - - - - - - - - - - How to use Seata to ensure consistency between Dubbo Microservices - - - - -

How to use Seata to ensure consistency between Dubbo Microservices

-

Use case

-

A business logic for user purchasing commodities. The whole business logic is powered by 3 microservices:

-
    -
  • Storage service: deduct storage count on given commodity.
  • -
  • Order service: create order according to purchase request.
  • -
  • Account service: debit the balance of user's account.
  • -
-

Architecture

-

Architecture

-

StorageService

-
public interface StorageService {
-
-    /**
-     * deduct storage count
-     */
-    void deduct(String commodityCode, int count);
-}
-
-

OrderService

-
public interface OrderService {
-
-    /**
-     * create order
-     */
-    Order create(String userId, String commodityCode, int orderCount);
-}
-
-

AccountService

-
public interface AccountService {
-
-    /**
-     * debit balance of user's account
-     */
-    void debit(String userId, int money);
-}
-
-

Main business logic

-
public class BusinessServiceImpl implements BusinessService {
-
-    private StorageService storageService;
-
-    private OrderService orderService;
-
-    /**
-     * purchase
-     */
-    public void purchase(String userId, String commodityCode, int orderCount) {
-
-        storageService.deduct(commodityCode, orderCount);
-
-        orderService.create(userId, commodityCode, orderCount);
-    }
-}
-
-
public class StorageServiceImpl implements StorageService {
-
-  private StorageDAO storageDAO;
-  
-    @Override
-    public void deduct(String commodityCode, int count) {
-        Storage storage = new Storage();
-        storage.setCount(count);
-        storage.setCommodityCode(commodityCode);
-        storageDAO.update(storage);
-    }
-}
-
-
public class OrderServiceImpl implements OrderService {
-
-    private OrderDAO orderDAO;
-
-    private AccountService accountService;
-
-    public Order create(String userId, String commodityCode, int orderCount) {
-
-        int orderMoney = calculate(commodityCode, orderCount);
-
-        accountService.debit(userId, orderMoney);
-
-        Order order = new Order();
-        order.userId = userId;
-        order.commodityCode = commodityCode;
-        order.count = orderCount;
-        order.money = orderMoney;
-
-        return orderDAO.insert(order);
-    }
-}
-
-

Distributed Transaction Solution with Seata

-

undefined

-

We just need an annotation @GlobalTransactional on business method:

-

-    @GlobalTransactional
-    public void purchase(String userId, String commodityCode, int orderCount) {
-        ......
-    }
-
-

Example powered by Dubbo + Seata

-

Step 1: Setup database

-
    -
  • Requirement: MySQL with InnoDB engine.
  • -
-

Note: In fact, there should be 3 database for the 3 services in the example use case. However, we can just create one database and configure 3 data sources for simple.

-

Modify Spring XML with the database URL/username/password you just created.

-

dubbo-account-service.xml -dubbo-order-service.xml -dubbo-storage-service.xml

-
    <property name="url" value="jdbc:mysql://x.x.x.x:3306/xxx" />
-    <property name="username" value="xxx" />
-    <property name="password" value="xxx" />
-
-

Step 2: Create UNDO_LOG table for Seata

-

UNDO_LOG table is required by Seata AT mode.

-
CREATE TABLE `undo_log` (
-  `id` bigint(20) NOT NULL AUTO_INCREMENT,
-  `branch_id` bigint(20) NOT NULL,
-  `xid` varchar(100) NOT NULL,
-  `rollback_info` longblob NOT NULL,
-  `log_status` int(11) NOT NULL,
-  `log_created` datetime NOT NULL,
-  `log_modified` datetime NOT NULL,
-  `ext` varchar(100) DEFAULT NULL,
-  PRIMARY KEY (`id`),
-  KEY `idx_unionkey` (`xid`,`branch_id`)
-) ENGINE=InnoDB AUTO_INCREMENT=159 DEFAULT CHARSET=utf8
-
-

Step 3: Create tables for example business

-

-DROP TABLE IF EXISTS `storage_tbl`;
-CREATE TABLE `storage_tbl` (
-  `id` int(11) NOT NULL AUTO_INCREMENT,
-  `commodity_code` varchar(255) DEFAULT NULL,
-  `count` int(11) DEFAULT 0,
-  PRIMARY KEY (`id`),
-  UNIQUE KEY (`commodity_code`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-
-
-DROP TABLE IF EXISTS `order_tbl`;
-CREATE TABLE `order_tbl` (
-  `id` int(11) NOT NULL AUTO_INCREMENT,
-  `user_id` varchar(255) DEFAULT NULL,
-  `commodity_code` varchar(255) DEFAULT NULL,
-  `count` int(11) DEFAULT 0,
-  `money` int(11) DEFAULT 0,
-  PRIMARY KEY (`id`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-
-
-DROP TABLE IF EXISTS `account_tbl`;
-CREATE TABLE `account_tbl` (
-  `id` int(11) NOT NULL AUTO_INCREMENT,
-  `user_id` varchar(255) DEFAULT NULL,
-  `money` int(11) DEFAULT 0,
-  PRIMARY KEY (`id`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-
-

Step 4: Start Seata-Server

-
    -
  • Download server package, unzip it.
  • -
  • Start Seata-Server
  • -
-
sh seata-server.sh $LISTEN_PORT $PATH_FOR_PERSISTENT_DATA
-
-e.g.
-
-sh seata-server.sh 8091 /home/admin/seata/data/
-
-

Step 5: Run example

- -

Related projects

- -
- - - - - - - diff --git a/en-us/blog/quick-start-use-seata-and-dubbo-services.json b/en-us/blog/quick-start-use-seata-and-dubbo-services.json deleted file mode 100644 index 22d4d8b5..00000000 --- a/en-us/blog/quick-start-use-seata-and-dubbo-services.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "filename": "quick-start-use-seata-and-dubbo-services.md", - "__html": "

How to use Seata to ensure consistency between Dubbo Microservices

\n

Use case

\n

A business logic for user purchasing commodities. The whole business logic is powered by 3 microservices:

\n
    \n
  • Storage service: deduct storage count on given commodity.
  • \n
  • Order service: create order according to purchase request.
  • \n
  • Account service: debit the balance of user's account.
  • \n
\n

Architecture

\n

\"Architecture\"

\n

StorageService

\n
public interface StorageService {\n\n    /**\n     * deduct storage count\n     */\n    void deduct(String commodityCode, int count);\n}\n
\n

OrderService

\n
public interface OrderService {\n\n    /**\n     * create order\n     */\n    Order create(String userId, String commodityCode, int orderCount);\n}\n
\n

AccountService

\n
public interface AccountService {\n\n    /**\n     * debit balance of user's account\n     */\n    void debit(String userId, int money);\n}\n
\n

Main business logic

\n
public class BusinessServiceImpl implements BusinessService {\n\n    private StorageService storageService;\n\n    private OrderService orderService;\n\n    /**\n     * purchase\n     */\n    public void purchase(String userId, String commodityCode, int orderCount) {\n\n        storageService.deduct(commodityCode, orderCount);\n\n        orderService.create(userId, commodityCode, orderCount);\n    }\n}\n
\n
public class StorageServiceImpl implements StorageService {\n\n  private StorageDAO storageDAO;\n  \n    @Override\n    public void deduct(String commodityCode, int count) {\n        Storage storage = new Storage();\n        storage.setCount(count);\n        storage.setCommodityCode(commodityCode);\n        storageDAO.update(storage);\n    }\n}\n
\n
public class OrderServiceImpl implements OrderService {\n\n    private OrderDAO orderDAO;\n\n    private AccountService accountService;\n\n    public Order create(String userId, String commodityCode, int orderCount) {\n\n        int orderMoney = calculate(commodityCode, orderCount);\n\n        accountService.debit(userId, orderMoney);\n\n        Order order = new Order();\n        order.userId = userId;\n        order.commodityCode = commodityCode;\n        order.count = orderCount;\n        order.money = orderMoney;\n\n        return orderDAO.insert(order);\n    }\n}\n
\n

Distributed Transaction Solution with Seata

\n

\"undefined\"

\n

We just need an annotation @GlobalTransactional on business method:

\n
\n    @GlobalTransactional\n    public void purchase(String userId, String commodityCode, int orderCount) {\n        ......\n    }\n
\n

Example powered by Dubbo + Seata

\n

Step 1: Setup database

\n
    \n
  • Requirement: MySQL with InnoDB engine.
  • \n
\n

Note: In fact, there should be 3 database for the 3 services in the example use case. However, we can just create one database and configure 3 data sources for simple.

\n

Modify Spring XML with the database URL/username/password you just created.

\n

dubbo-account-service.xml\ndubbo-order-service.xml\ndubbo-storage-service.xml

\n
    <property name=\"url\" value=\"jdbc:mysql://x.x.x.x:3306/xxx\" />\n    <property name=\"username\" value=\"xxx\" />\n    <property name=\"password\" value=\"xxx\" />\n
\n

Step 2: Create UNDO_LOG table for Seata

\n

UNDO_LOG table is required by Seata AT mode.

\n
CREATE TABLE `undo_log` (\n  `id` bigint(20) NOT NULL AUTO_INCREMENT,\n  `branch_id` bigint(20) NOT NULL,\n  `xid` varchar(100) NOT NULL,\n  `rollback_info` longblob NOT NULL,\n  `log_status` int(11) NOT NULL,\n  `log_created` datetime NOT NULL,\n  `log_modified` datetime NOT NULL,\n  `ext` varchar(100) DEFAULT NULL,\n  PRIMARY KEY (`id`),\n  KEY `idx_unionkey` (`xid`,`branch_id`)\n) ENGINE=InnoDB AUTO_INCREMENT=159 DEFAULT CHARSET=utf8\n
\n

Step 3: Create tables for example business

\n
\nDROP TABLE IF EXISTS `storage_tbl`;\nCREATE TABLE `storage_tbl` (\n  `id` int(11) NOT NULL AUTO_INCREMENT,\n  `commodity_code` varchar(255) DEFAULT NULL,\n  `count` int(11) DEFAULT 0,\n  PRIMARY KEY (`id`),\n  UNIQUE KEY (`commodity_code`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n\nDROP TABLE IF EXISTS `order_tbl`;\nCREATE TABLE `order_tbl` (\n  `id` int(11) NOT NULL AUTO_INCREMENT,\n  `user_id` varchar(255) DEFAULT NULL,\n  `commodity_code` varchar(255) DEFAULT NULL,\n  `count` int(11) DEFAULT 0,\n  `money` int(11) DEFAULT 0,\n  PRIMARY KEY (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n\nDROP TABLE IF EXISTS `account_tbl`;\nCREATE TABLE `account_tbl` (\n  `id` int(11) NOT NULL AUTO_INCREMENT,\n  `user_id` varchar(255) DEFAULT NULL,\n  `money` int(11) DEFAULT 0,\n  PRIMARY KEY (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n
\n

Step 4: Start Seata-Server

\n
    \n
  • Download server package, unzip it.
  • \n
  • Start Seata-Server
  • \n
\n
sh seata-server.sh $LISTEN_PORT $PATH_FOR_PERSISTENT_DATA\n\ne.g.\n\nsh seata-server.sh 8091 /home/admin/seata/data/\n
\n

Step 5: Run example

\n\n

Related projects

\n\n", - "link": "/en-us/blog/quick-start-use-seata-and-dubbo-services.html", - "meta": { - "title": "How to use Seata to ensure consistency between Dubbo Microservices", - "keywords": "Dubbo,Seata,Consistency", - "description": "This article will introduce you how to use Seata to ensure consistency between Dubbo Microservices.", - "author": "slievrly", - "date": "2019-03-07" - } -} \ No newline at end of file diff --git a/en-us/community/index.html b/en-us/community/index.html deleted file mode 100644 index 535123c7..00000000 --- a/en-us/community/index.html +++ /dev/null @@ -1,32 +0,0 @@ - - - - - - - - - - Community - - - - -
Community

Events & News

title

seata

May 12nd,2018

Talk To Us

Feel free to contact us via the following channel.

Contributor Guide

You can always contribute to Seata.

Mailing List

Join our mailing list.

Issue

Submit a new issue.

Documents

Improve the documentation.

Pull Request

Create a brilliant pull request.

- - - - - - - diff --git a/en-us/docs/dev/mode/at-mode.html b/en-us/docs/dev/mode/at-mode.html deleted file mode 100644 index e5a0eaa5..00000000 --- a/en-us/docs/dev/mode/at-mode.html +++ /dev/null @@ -1,277 +0,0 @@ - - - - - - - - - - Seata AT Mode - - - - -
Documentation

Seata AT mode

-

The basic idea

-

Prerequisite

-
    -
  • Relational databases that support local ACID transaction.
  • -
  • Java applications that access database via JDBC.
  • -
-

Overall mechanism

-

Evolution from the two phases commit protocol:

-
    -
  • Phase 1:commit business data and rollback log in the same local transaction, then release local lock and connection resources.
  • -
  • Phase 2: -
      -
    • for commit case, do the work asynchronously and quickly.
    • -
    • for rollback case, do compensation, base on the rollback log created in the phase 1.
    • -
    -
  • -
-

Write isolation

-
    -
  • The global lock must be acquired before committing the local transaction of phase 1.
  • -
  • If the global lock is not acquired, the local transaction should not be committed.
  • -
  • One transaction will try to acquire the global lock many times if it fails to, but there is a timeout, if it's timeout, rollback local transaction and release local lock as well.
  • -
-

For example:

-

Two transactions tx1 and tx2 are trying to update field m of table a. The original value of m is 1000.

-

tx1 starts first, begins a local transaction, acquires the local lock, do the update operation: m = 1000 - 100 = 900. tx1 must acquire the global lock before committing the local transaction, after that, commit local transaction and release local lock.

-

next, tx2 begins local transaction, acquires local lock, do the update operation: m = 900 - 100 = 800. Before tx2 can commit local transaction, it must acquire the global lock, but the global lock may be hold by tx1, so tx2 will do retry. After tx1 does the global commit and releases the global lock, tx2 can acquire the global lock, then it can commit local transaction and release local lock.

-

Write-Isolation: Commit

-

See the figure above, tx1 does the global commit in phase 2 and release the global lock, tx2 acquires the global lock and commits local transaction.

-

Write-Isolation: Rollback

-

See the figure above, if tx1 wants to do the global rollback, it must acquire local lock to revert the update operation of phase 1.

-

However, now the local lock is held by tx2 which hopes to acquire the global lock, so tx1 fails to rollback, but it would try it many times until it's timeout for tx2 to acquire the global lock, then tx2 rollbacks local transaction and releases local lock, after that, tx1 can acquire the local lock, and do the branch rollback successfully.

-

Because the global lock is held by tx1 during the whole process, there isn't no problem of dirty write.

-

Read isolation

-

The isolation level of local database is read committed or above, so the default isolation level of the global transaction is read uncommitted.

-

If it needs the isolation level of the global transaction is read committed, currently, Fescar implements it via SELECT FOR UPDATE statement.

-

Read Isolation: SELECT FOR UPDATE

-

The global lock is be applied during the execution of SELECT FOR UPDATE statement, if the global lock is held by other transactions, the transaction will release local lock retry execute the SELECT FOR UPDATE statement. During the whole process, the query is blocked until the global lock is acquired, if the lock is acquired, it means the other global transaction has committed, so the isolation level of global transaction is read committed.

-

For the performance consideration, Fescar only does proxy work for SELECT FOR UPDATE. For the general SELECT statement, do nothing.

-

Work process

-

Take an example to illustrate it.

-

A business table:product

- - - - - - - - - - - - - - - - - - - - - - - - - -
FieldTypeKey
idbigint(20)PRI
namevarchar(100)
sincevarchar(100)
-

The sql of branch transaction in AT mode:

-
update product set name = 'GTS' where name = 'TXC';
-
-

Phase 1

-

Process:

-
    -
  1. Parse sql: know the sql type is update operation, table name is product, the where condition is name = 'TXC' and so on.
  2. -
  3. Query the data before update(Named before image): In order to locate the data that will be updated, generate a query statement by the where condition above.
  4. -
-
select id, name, since from product where name = 'TXC';
-
-

Got the "before image":

- - - - - - - - - - - - - - - -
idnamesince
1TXC2014
-
    -
  1. Execute the update sql: update the record of name equals 'GTS'.
  2. -
  3. Query the data after update(Named after image): locate the record by the primary key of image data before update.
  4. -
-
select id, name, since from product where id = 1;
-
-

Got the after image:

- - - - - - - - - - - - - - - -
idnamesince
1GTS2014
-
    -
  1. Insert a rollback log: build the rollback log with image before and after, as well as SQL statement relelated information, then insert into table UNDO_LOG .
  2. -
-
{
-	"branchId": 641789253,
-	"undoItems": [{
-		"afterImage": {
-			"rows": [{
-				"fields": [{
-					"name": "id",
-					"type": 4,
-					"value": 1
-				}, {
-					"name": "name",
-					"type": 12,
-					"value": "GTS"
-				}, {
-					"name": "since",
-					"type": 12,
-					"value": "2014"
-				}]
-			}],
-			"tableName": "product"
-		},
-		"beforeImage": {
-			"rows": [{
-				"fields": [{
-					"name": "id",
-					"type": 4,
-					"value": 1
-				}, {
-					"name": "name",
-					"type": 12,
-					"value": "TXC"
-				}, {
-					"name": "since",
-					"type": 12,
-					"value": "2014"
-				}]
-			}],
-			"tableName": "product"
-		},
-		"sqlType": "UPDATE"
-	}],
-	"xid": "xid:xxx"
-}
-
-
    -
  1. Before local commit, the transaction submmit an application to TC to acquire a global lock for the record whose primary key equals 1 in the table product.
  2. -
  3. Commit local transaction: commit the update of PRODUCT table and the insert of UNDO_LOG table in the same local transaction.
  4. -
  5. Report the result of step 7 to TC.
  6. -
-

Phase 2 - Rollback case

-
    -
  1. After receive the rollback request from TC, begin a local transaction, execute operation as following.
  2. -
  3. Retrieve the UNDO LOG by XID and Branch ID.
  4. -
  5. Validate data: Compare the image data after update in UNDO LOG with current data, if there is difference, it means the data has been changed by operation out of current transaction, it should be handled in different policy, we will describe it detailedly in other document.
  6. -
  7. Generate rollback SQL statement base on before image in UNDO LOG and related information of the business SQL.
  8. -
-
update product set name = 'TXC' where id = 1;
-
-
    -
  1. Commit local transaction, report the result of execution of local transaction(The rollback result of the Branch transaction) to TC.
  2. -
-

Phase 2 - Commit case

-
    -
  1. After receive the commit request from TC, put the request into a work queue, return success to TC immediately.
  2. -
  3. During the phase of doing the asynchronous work in the queue, the UNDO LOGs are deleted in batch way.
  4. -
-

Appendix

-

Undo log table

-

UNDO_LOG Table:there is a little bit difference on the data type for different databases.

-

For MySQL example:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldType
branch_idbigint PK
xidvarchar(100)
rollback_infolongblob
log_statustinyint
log_createddatetime
log_modifieddatetime
extvarchar(100)
-
CREATE TABLE `undo_log` (
-  `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'increment id',
-  `branch_id` bigint(20) NOT NULL COMMENT 'branch transaction id',
-  `xid` varchar(100) NOT NULL COMMENT 'global transaction id',
-  `context` varchar(128) NOT NULL COMMENT 'undo_log context,such as serialization',
-  `rollback_info` longblob NOT NULL COMMENT 'rollback info',
-  `log_status` int(11) NOT NULL COMMENT '0:normal status,1:defense status',
-  `log_created` datetime NOT NULL COMMENT 'create datetime',
-  `log_modified` datetime NOT NULL COMMENT 'modify datetime',
-  `ext` varchar(100) DEFAULT NULL COMMENT 'reserved field',
-  PRIMARY KEY (`id`),
-  UNIQUE KEY `ux_undo_log` (`xid`,`branch_id`)
-) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8 COMMENT='AT transaction mode undo table';
-
-
- - - - - - - diff --git a/en-us/docs/dev/mode/at-mode.json b/en-us/docs/dev/mode/at-mode.json deleted file mode 100644 index b93fae84..00000000 --- a/en-us/docs/dev/mode/at-mode.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "at-mode.md", - "__html": "

Seata AT mode

\n

The basic idea

\n

Prerequisite

\n
    \n
  • Relational databases that support local ACID transaction.
  • \n
  • Java applications that access database via JDBC.
  • \n
\n

Overall mechanism

\n

Evolution from the two phases commit protocol:

\n
    \n
  • Phase 1:commit business data and rollback log in the same local transaction, then release local lock and connection resources.
  • \n
  • Phase 2:\n
      \n
    • for commit case, do the work asynchronously and quickly.
    • \n
    • for rollback case, do compensation, base on the rollback log created in the phase 1.
    • \n
    \n
  • \n
\n

Write isolation

\n
    \n
  • The global lock must be acquired before committing the local transaction of phase 1.
  • \n
  • If the global lock is not acquired, the local transaction should not be committed.
  • \n
  • One transaction will try to acquire the global lock many times if it fails to, but there is a timeout, if it's timeout, rollback local transaction and release local lock as well.
  • \n
\n

For example:

\n

Two transactions tx1 and tx2 are trying to update field m of table a. The original value of m is 1000.

\n

tx1 starts first, begins a local transaction, acquires the local lock, do the update operation: m = 1000 - 100 = 900. tx1 must acquire the global lock before committing the local transaction, after that, commit local transaction and release local lock.

\n

next, tx2 begins local transaction, acquires local lock, do the update operation: m = 900 - 100 = 800. Before tx2 can commit local transaction, it must acquire the global lock, but the global lock may be hold by tx1, so tx2 will do retry. After tx1 does the global commit and releases the global lock, tx2 can acquire the global lock, then it can commit local transaction and release local lock.

\n

\"Write-Isolation:

\n

See the figure above, tx1 does the global commit in phase 2 and release the global lock, tx2 acquires the global lock and commits local transaction.

\n

\"Write-Isolation:

\n

See the figure above, if tx1 wants to do the global rollback, it must acquire local lock to revert the update operation of phase 1.

\n

However, now the local lock is held by tx2 which hopes to acquire the global lock, so tx1 fails to rollback, but it would try it many times until it's timeout for tx2 to acquire the global lock, then tx2 rollbacks local transaction and releases local lock, after that, tx1 can acquire the local lock, and do the branch rollback successfully.

\n

Because the global lock is held by tx1 during the whole process, there isn't no problem of dirty write.

\n

Read isolation

\n

The isolation level of local database is read committed or above, so the default isolation level of the global transaction is read uncommitted.

\n

If it needs the isolation level of the global transaction is read committed, currently, Fescar implements it via SELECT FOR UPDATE statement.

\n

\"Read

\n

The global lock is be applied during the execution of SELECT FOR UPDATE statement, if the global lock is held by other transactions, the transaction will release local lock retry execute the SELECT FOR UPDATE statement. During the whole process, the query is blocked until the global lock is acquired, if the lock is acquired, it means the other global transaction has committed, so the isolation level of global transaction is read committed.

\n

For the performance consideration, Fescar only does proxy work for SELECT FOR UPDATE. For the general SELECT statement, do nothing.

\n

Work process

\n

Take an example to illustrate it.

\n

A business table:product

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
FieldTypeKey
idbigint(20)PRI
namevarchar(100)
sincevarchar(100)
\n

The sql of branch transaction in AT mode:

\n
update product set name = 'GTS' where name = 'TXC';\n
\n

Phase 1

\n

Process:

\n
    \n
  1. Parse sql: know the sql type is update operation, table name is product, the where condition is name = 'TXC' and so on.
  2. \n
  3. Query the data before update(Named before image): In order to locate the data that will be updated, generate a query statement by the where condition above.
  4. \n
\n
select id, name, since from product where name = 'TXC';\n
\n

Got the "before image":

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
idnamesince
1TXC2014
\n
    \n
  1. Execute the update sql: update the record of name equals 'GTS'.
  2. \n
  3. Query the data after update(Named after image): locate the record by the primary key of image data before update.
  4. \n
\n
select id, name, since from product where id = 1;\n
\n

Got the after image:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
idnamesince
1GTS2014
\n
    \n
  1. Insert a rollback log: build the rollback log with image before and after, as well as SQL statement relelated information, then insert into table UNDO_LOG .
  2. \n
\n
{\n\t\"branchId\": 641789253,\n\t\"undoItems\": [{\n\t\t\"afterImage\": {\n\t\t\t\"rows\": [{\n\t\t\t\t\"fields\": [{\n\t\t\t\t\t\"name\": \"id\",\n\t\t\t\t\t\"type\": 4,\n\t\t\t\t\t\"value\": 1\n\t\t\t\t}, {\n\t\t\t\t\t\"name\": \"name\",\n\t\t\t\t\t\"type\": 12,\n\t\t\t\t\t\"value\": \"GTS\"\n\t\t\t\t}, {\n\t\t\t\t\t\"name\": \"since\",\n\t\t\t\t\t\"type\": 12,\n\t\t\t\t\t\"value\": \"2014\"\n\t\t\t\t}]\n\t\t\t}],\n\t\t\t\"tableName\": \"product\"\n\t\t},\n\t\t\"beforeImage\": {\n\t\t\t\"rows\": [{\n\t\t\t\t\"fields\": [{\n\t\t\t\t\t\"name\": \"id\",\n\t\t\t\t\t\"type\": 4,\n\t\t\t\t\t\"value\": 1\n\t\t\t\t}, {\n\t\t\t\t\t\"name\": \"name\",\n\t\t\t\t\t\"type\": 12,\n\t\t\t\t\t\"value\": \"TXC\"\n\t\t\t\t}, {\n\t\t\t\t\t\"name\": \"since\",\n\t\t\t\t\t\"type\": 12,\n\t\t\t\t\t\"value\": \"2014\"\n\t\t\t\t}]\n\t\t\t}],\n\t\t\t\"tableName\": \"product\"\n\t\t},\n\t\t\"sqlType\": \"UPDATE\"\n\t}],\n\t\"xid\": \"xid:xxx\"\n}\n
\n
    \n
  1. Before local commit, the transaction submmit an application to TC to acquire a global lock for the record whose primary key equals 1 in the table product.
  2. \n
  3. Commit local transaction: commit the update of PRODUCT table and the insert of UNDO_LOG table in the same local transaction.
  4. \n
  5. Report the result of step 7 to TC.
  6. \n
\n

Phase 2 - Rollback case

\n
    \n
  1. After receive the rollback request from TC, begin a local transaction, execute operation as following.
  2. \n
  3. Retrieve the UNDO LOG by XID and Branch ID.
  4. \n
  5. Validate data: Compare the image data after update in UNDO LOG with current data, if there is difference, it means the data has been changed by operation out of current transaction, it should be handled in different policy, we will describe it detailedly in other document.
  6. \n
  7. Generate rollback SQL statement base on before image in UNDO LOG and related information of the business SQL.
  8. \n
\n
update product set name = 'TXC' where id = 1;\n
\n
    \n
  1. Commit local transaction, report the result of execution of local transaction(The rollback result of the Branch transaction) to TC.
  2. \n
\n

Phase 2 - Commit case

\n
    \n
  1. After receive the commit request from TC, put the request into a work queue, return success to TC immediately.
  2. \n
  3. During the phase of doing the asynchronous work in the queue, the UNDO LOGs are deleted in batch way.
  4. \n
\n

Appendix

\n

Undo log table

\n

UNDO_LOG Table:there is a little bit difference on the data type for different databases.

\n

For MySQL example:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
FieldType
branch_idbigint PK
xidvarchar(100)
rollback_infolongblob
log_statustinyint
log_createddatetime
log_modifieddatetime
extvarchar(100)
\n
CREATE TABLE `undo_log` (\n  `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'increment id',\n  `branch_id` bigint(20) NOT NULL COMMENT 'branch transaction id',\n  `xid` varchar(100) NOT NULL COMMENT 'global transaction id',\n  `context` varchar(128) NOT NULL COMMENT 'undo_log context,such as serialization',\n  `rollback_info` longblob NOT NULL COMMENT 'rollback info',\n  `log_status` int(11) NOT NULL COMMENT '0:normal status,1:defense status',\n  `log_created` datetime NOT NULL COMMENT 'create datetime',\n  `log_modified` datetime NOT NULL COMMENT 'modify datetime',\n  `ext` varchar(100) DEFAULT NULL COMMENT 'reserved field',\n  PRIMARY KEY (`id`),\n  UNIQUE KEY `ux_undo_log` (`xid`,`branch_id`)\n) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8 COMMENT='AT transaction mode undo table';\n
\n", - "link": "/en-us/docs/dev/mode/at-mode.html", - "meta": { - "title": "Seata AT Mode", - "keywords": "Seata, AT mode", - "description": "Seata AT mode." - } -} \ No newline at end of file diff --git a/en-us/docs/dev/mode/saga-mode.html b/en-us/docs/dev/mode/saga-mode.html deleted file mode 100644 index 84a2c724..00000000 --- a/en-us/docs/dev/mode/saga-mode.html +++ /dev/null @@ -1,33 +0,0 @@ - - - - - - - - - - Seata Saga Mode - - - - -
Documentation

TODO : Should be translated from docs/zh-cn/dev/mode/saga-mode.md

-
- - - - - - - diff --git a/en-us/docs/dev/mode/saga-mode.json b/en-us/docs/dev/mode/saga-mode.json deleted file mode 100644 index abd54792..00000000 --- a/en-us/docs/dev/mode/saga-mode.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "saga-mode.md", - "__html": "

TODO : Should be translated from docs/zh-cn/dev/mode/saga-mode.md

\n", - "link": "/en-us/docs/dev/mode/saga-mode.html", - "meta": { - "title": "Seata Saga Mode", - "keywords": "Seata, Saga mode", - "description": "Seata Saga mode." - } -} \ No newline at end of file diff --git a/en-us/docs/dev/mode/tcc-mode.html b/en-us/docs/dev/mode/tcc-mode.html deleted file mode 100644 index 1ddcf484..00000000 --- a/en-us/docs/dev/mode/tcc-mode.html +++ /dev/null @@ -1,53 +0,0 @@ - - - - - - - - - - Seata TCC Mode - - - - -
Documentation

Seata TCC Mode

-

Review the description in the overview: A distributed global transaction, the whole is a two-phase commit model. The global transaction is composed of several branch transactions. The branch transaction must meet the requirements of the two-phase commit model, that is, each branch transaction must have its own:

-
    -
  • One-stage prepare behavior
  • -
  • Two-phase commit or rollback behavior
  • -
-

Overview of a global transaction

-

According to the two-phase behavior mode, we divide branch transactions into Automatic (Branch) Transaction Mode and TCC (Branch) Transaction Mode.

-

The AT mode (Reference Link TBD) is based on a relational database that supports local ACID transactions:

-
    -
  • One-stage prepare behavior: In local transactions, business data updates and corresponding rollback log records are submitted together.
  • -
  • Two-phase commit behavior: Immediately completed successfully, automatically asynchronously clean up the rollback log.
  • -
  • Two-phase rollback behavior: Through the rollback log, automatically generates compensation operations to complete data rollback.
  • -
-

Correspondingly, the TCC mode does not rely on transaction support of the underlying data resources:

-
    -
  • One-stage prepare behavior: Call the custom prepare logic.
  • -
  • Two-phase commit behavior: Call custom commit logic.
  • -
  • Two-phase rollback behavior: Call the custom rollback logic.
  • -
-

The so-called TCC mode refers to the support of putting customized's branch transactions into the management of global transactions.

-
- - - - - - - diff --git a/en-us/docs/dev/mode/tcc-mode.json b/en-us/docs/dev/mode/tcc-mode.json deleted file mode 100644 index 626a1c2b..00000000 --- a/en-us/docs/dev/mode/tcc-mode.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "tcc-mode.md", - "__html": "

Seata TCC Mode

\n

Review the description in the overview: A distributed global transaction, the whole is a two-phase commit model. The global transaction is composed of several branch transactions. The branch transaction must meet the requirements of the two-phase commit model, that is, each branch transaction must have its own:

\n
    \n
  • One-stage prepare behavior
  • \n
  • Two-phase commit or rollback behavior
  • \n
\n

\"Overview

\n

According to the two-phase behavior mode, we divide branch transactions into Automatic (Branch) Transaction Mode and TCC (Branch) Transaction Mode.

\n

The AT mode (Reference Link TBD) is based on a relational database that supports local ACID transactions:

\n
    \n
  • One-stage prepare behavior: In local transactions, business data updates and corresponding rollback log records are submitted together.
  • \n
  • Two-phase commit behavior: Immediately completed successfully, automatically asynchronously clean up the rollback log.
  • \n
  • Two-phase rollback behavior: Through the rollback log, automatically generates compensation operations to complete data rollback.
  • \n
\n

Correspondingly, the TCC mode does not rely on transaction support of the underlying data resources:

\n
    \n
  • One-stage prepare behavior: Call the custom prepare logic.
  • \n
  • Two-phase commit behavior: Call custom commit logic.
  • \n
  • Two-phase rollback behavior: Call the custom rollback logic.
  • \n
\n

The so-called TCC mode refers to the support of putting customized's branch transactions into the management of global transactions.

\n", - "link": "/en-us/docs/dev/mode/tcc-mode.html", - "meta": { - "title": "Seata TCC Mode", - "keywords": "Seata, TCC Mode", - "description": "Seata TCC mode." - } -} \ No newline at end of file diff --git a/en-us/docs/dev/seata-mertics.html b/en-us/docs/dev/seata-mertics.html deleted file mode 100644 index e8357c40..00000000 --- a/en-us/docs/dev/seata-mertics.html +++ /dev/null @@ -1,33 +0,0 @@ - - - - - - - - - - Seata Metrics - - - - -
Documentation

TODO : Should be translated from docs/zh-cn/dev/architecture/seata_mertics.md

-
- - - - - - - diff --git a/en-us/docs/dev/seata-mertics.json b/en-us/docs/dev/seata-mertics.json deleted file mode 100644 index cfe58af4..00000000 --- a/en-us/docs/dev/seata-mertics.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "seata-mertics.md", - "__html": "

TODO : Should be translated from docs/zh-cn/dev/architecture/seata_mertics.md

\n", - "link": "/en-us/docs/dev/seata-mertics.html", - "meta": { - "title": "Seata Metrics", - "keywords": "Seata, Metrics", - "description": "Seata Metrics." - } -} \ No newline at end of file diff --git a/en-us/docs/developers/committer-guide/label-an-issue-guide_dev.html b/en-us/docs/developers/committer-guide/label-an-issue-guide_dev.html deleted file mode 100644 index 7a6b98e8..00000000 --- a/en-us/docs/developers/committer-guide/label-an-issue-guide_dev.html +++ /dev/null @@ -1,73 +0,0 @@ - - - - - - - - - - Label an Issue - - - - -
Documentation

Label an Issue

-

If you are handling an issue, remember to mark the issue cearly with one or more labels whenever you think it's meaningful. With labels on, other developers can easily recognize problems, classify them or track progress.

-

For issues or pull requests that need coding and further version release to fix, you should always mark it with a milestone.

-

Some frequently used labels:

-
    -
  • -

    Help Wanted

    -
      -
    • help wanted
    • -
    • good first issue
    • -
    -
  • -
  • -

    Prority

    -
      -
    • priority/blocker
    • -
    • priority/high
    • -
    • priority/low
    • -
    • priority/normal
    • -
    -
  • -
  • -

    Status

    -
      -
    • status/need-triage
    • -
    • status/DO-NOT-MERGE
    • -
    • status/READY-TO-MERGE
    • -
    • status/invalid
    • -
    • status/wontfix
    • -
    -
  • -
  • -

    Type

    -
      -
    • type/bug
    • -
    • type/documentation
    • -
    • type/enhancement
    • -
    • type/feature
    • -
    -
  • -
-
- - - - - - - diff --git a/en-us/docs/developers/committer-guide/label-an-issue-guide_dev.json b/en-us/docs/developers/committer-guide/label-an-issue-guide_dev.json deleted file mode 100644 index bdf160f1..00000000 --- a/en-us/docs/developers/committer-guide/label-an-issue-guide_dev.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "label-an-issue-guide_dev.md", - "__html": "

Label an Issue

\n

If you are handling an issue, remember to mark the issue cearly with one or more labels whenever you think it's meaningful. With labels on, other developers can easily recognize problems, classify them or track progress.

\n

For issues or pull requests that need coding and further version release to fix, you should always mark it with a milestone.

\n

Some frequently used labels:

\n
    \n
  • \n

    Help Wanted

    \n
      \n
    • help wanted
    • \n
    • good first issue
    • \n
    \n
  • \n
  • \n

    Prority

    \n
      \n
    • priority/blocker
    • \n
    • priority/high
    • \n
    • priority/low
    • \n
    • priority/normal
    • \n
    \n
  • \n
  • \n

    Status

    \n
      \n
    • status/need-triage
    • \n
    • status/DO-NOT-MERGE
    • \n
    • status/READY-TO-MERGE
    • \n
    • status/invalid
    • \n
    • status/wontfix
    • \n
    \n
  • \n
  • \n

    Type

    \n
      \n
    • type/bug
    • \n
    • type/documentation
    • \n
    • type/enhancement
    • \n
    • type/feature
    • \n
    \n
  • \n
\n", - "link": "/en-us/docs/developers/committer-guide/label-an-issue-guide_dev.html", - "meta": { - "title": "Label an Issue", - "keywords": "Seata", - "description": "Label an Issue." - } -} \ No newline at end of file diff --git a/en-us/docs/developers/committer-guide/release-guide_dev.html b/en-us/docs/developers/committer-guide/release-guide_dev.html deleted file mode 100644 index 0fb38ca7..00000000 --- a/en-us/docs/developers/committer-guide/release-guide_dev.html +++ /dev/null @@ -1,33 +0,0 @@ - - - - - - - - - - release-guide_dev - - - - -
Documentation

TBD

-
- - - - - - - diff --git a/en-us/docs/developers/committer-guide/release-guide_dev.json b/en-us/docs/developers/committer-guide/release-guide_dev.json deleted file mode 100644 index c6767d70..00000000 --- a/en-us/docs/developers/committer-guide/release-guide_dev.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "filename": "release-guide_dev.md", - "__html": "

TBD

\n", - "link": "/en-us/docs/developers/committer-guide/release-guide_dev.html", - "meta": {} -} \ No newline at end of file diff --git a/en-us/docs/developers/committer-guide/website-guide_dev.html b/en-us/docs/developers/committer-guide/website-guide_dev.html deleted file mode 100644 index a268e352..00000000 --- a/en-us/docs/developers/committer-guide/website-guide_dev.html +++ /dev/null @@ -1,37 +0,0 @@ - - - - - - - - - - Website Guide - - - - -
Documentation

Website Guide

-
    -
  1. The website repository of Seata is https://github.com/seata/seata.github.io.
  2. -
  3. After building the website, it'll be published to seata.io automatically.
  4. -
-
- - - - - - - diff --git a/en-us/docs/developers/committer-guide/website-guide_dev.json b/en-us/docs/developers/committer-guide/website-guide_dev.json deleted file mode 100644 index 79f487e2..00000000 --- a/en-us/docs/developers/committer-guide/website-guide_dev.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "website-guide_dev.md", - "__html": "

Website Guide

\n
    \n
  1. The website repository of Seata is https://github.com/seata/seata.github.io.
  2. \n
  3. After building the website, it'll be published to seata.io automatically.
  4. \n
\n", - "link": "/en-us/docs/developers/committer-guide/website-guide_dev.html", - "meta": { - "title": "Website Guide", - "keywords": "Seata", - "description": "Website Guide." - } -} \ No newline at end of file diff --git a/en-us/docs/developers/contributor-guide/new-contributor-guide_dev.html b/en-us/docs/developers/contributor-guide/new-contributor-guide_dev.html deleted file mode 100644 index f61349aa..00000000 --- a/en-us/docs/developers/contributor-guide/new-contributor-guide_dev.html +++ /dev/null @@ -1,55 +0,0 @@ - - - - - - - - - - New contributor guide - - - - -
Documentation

New contributor guide

-

This is a guide for new comers who wants to contribute to Seata.

-

Subscribe to the mailing list

-

TBD

-

Reporting issue

-

You can always reporting an issue to Seata via Github Issues.

-

If you are reporting bugs, please refer to the issue report template.

-

If you are reporting feature, please refer to the issue report template.

-

If you are reporting regular issues, like raise an question, you can open an regular issue.

-

Sending pull request

-
    -
  • Follow the checklist in the pull request template
  • -
  • Before you sending out the pull request, please sync your forked repository with remote repository, this will make your pull request simple and clear. See guide below:
  • -
-
git remote add upstream git@github.com:seata/seata.git
-git fetch upstream
-git rebase upstream/master
-git checkout -b your_awesome_patch
-... add some work
-git push origin your_awesome_patch
-
-

Code convention

-

Please check the CONTRIBUTING.md for code convention.

-
- - - - - - - diff --git a/en-us/docs/developers/contributor-guide/new-contributor-guide_dev.json b/en-us/docs/developers/contributor-guide/new-contributor-guide_dev.json deleted file mode 100644 index 526b67a0..00000000 --- a/en-us/docs/developers/contributor-guide/new-contributor-guide_dev.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "new-contributor-guide_dev.md", - "__html": "

New contributor guide

\n

This is a guide for new comers who wants to contribute to Seata.

\n

Subscribe to the mailing list

\n

TBD

\n

Reporting issue

\n

You can always reporting an issue to Seata via Github Issues.

\n

If you are reporting bugs, please refer to the issue report template.

\n

If you are reporting feature, please refer to the issue report template.

\n

If you are reporting regular issues, like raise an question, you can open an regular issue.

\n

Sending pull request

\n
    \n
  • Follow the checklist in the pull request template
  • \n
  • Before you sending out the pull request, please sync your forked repository with remote repository, this will make your pull request simple and clear. See guide below:
  • \n
\n
git remote add upstream git@github.com:seata/seata.git\ngit fetch upstream\ngit rebase upstream/master\ngit checkout -b your_awesome_patch\n... add some work\ngit push origin your_awesome_patch\n
\n

Code convention

\n

Please check the CONTRIBUTING.md for code convention.

\n", - "link": "/en-us/docs/developers/contributor-guide/new-contributor-guide_dev.html", - "meta": { - "title": "New contributor guide", - "keywords": "Seata, contributor", - "description": "This is a guide for new comers who wants to contribute to Seata." - } -} \ No newline at end of file diff --git a/en-us/docs/developers/contributor-guide/reporting-security-issues_dev.html b/en-us/docs/developers/contributor-guide/reporting-security-issues_dev.html deleted file mode 100644 index 20082167..00000000 --- a/en-us/docs/developers/contributor-guide/reporting-security-issues_dev.html +++ /dev/null @@ -1,45 +0,0 @@ - - - - - - - - - - New contributor guide - - - - -
Documentation

Reporting Security Issues

-

The Seata Group takes a rigorous standpoint in annihilating the security issues in its software projects. Seata is highly sensitive and forthcoming to issues pertaining to its features and functionality.

-

REPORTING VULNERABILITY

-

If you have apprehensions regarding Seata's security or you discover vulnerability or potential threat, don’t hesitate to get in touch with the Seata Security Team by dropping a mail at dev-seata@googlegroups.com. In the mail, specify the description of the issue or potential threat. You are also urged to recommend the way to reproduce and replicate the issue. The Seata community will get back to you after assessing and analysing the findings.

-

PLEASE PAY ATTENTION to report the security issue on the security email before disclosing it on public domain.

-

VULNERABILITY HANDLING

-

An overview of the vulnerability handling process is:

-
    -
  • The reporter reports the vulnerability privately to Apache.
  • -
  • The appropriate project's security team works privately with the reporter to resolve the vulnerability.
  • -
  • A new release of the Apache product concerned is made that includes the fix.
  • -
  • The vulnerability is publically announced.
  • -
-
- - - - - - - diff --git a/en-us/docs/developers/contributor-guide/reporting-security-issues_dev.json b/en-us/docs/developers/contributor-guide/reporting-security-issues_dev.json deleted file mode 100644 index ab1072bd..00000000 --- a/en-us/docs/developers/contributor-guide/reporting-security-issues_dev.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "reporting-security-issues_dev.md", - "__html": "

Reporting Security Issues

\n

The Seata Group takes a rigorous standpoint in annihilating the security issues in its software projects. Seata is highly sensitive and forthcoming to issues pertaining to its features and functionality.

\n

REPORTING VULNERABILITY

\n

If you have apprehensions regarding Seata's security or you discover vulnerability or potential threat, don’t hesitate to get in touch with the Seata Security Team by dropping a mail at dev-seata@googlegroups.com. In the mail, specify the description of the issue or potential threat. You are also urged to recommend the way to reproduce and replicate the issue. The Seata community will get back to you after assessing and analysing the findings.

\n

PLEASE PAY ATTENTION to report the security issue on the security email before disclosing it on public domain.

\n

VULNERABILITY HANDLING

\n

An overview of the vulnerability handling process is:

\n
    \n
  • The reporter reports the vulnerability privately to Apache.
  • \n
  • The appropriate project's security team works privately with the reporter to resolve the vulnerability.
  • \n
  • A new release of the Apache product concerned is made that includes the fix.
  • \n
  • The vulnerability is publically announced.
  • \n
\n", - "link": "/en-us/docs/developers/contributor-guide/reporting-security-issues_dev.html", - "meta": { - "title": "New contributor guide", - "keywords": "Seata, contributor", - "description": "This is a guide for new comers who wants to contribute to Seata." - } -} \ No newline at end of file diff --git a/en-us/docs/developers/contributor-guide/test-coverage-guide_dev.html b/en-us/docs/developers/contributor-guide/test-coverage-guide_dev.html deleted file mode 100644 index b91ecd0c..00000000 --- a/en-us/docs/developers/contributor-guide/test-coverage-guide_dev.html +++ /dev/null @@ -1,53 +0,0 @@ - - - - - - - - - - Test coverage guide - - - - -
Documentation

Test coverage guide

-

1.The benefits of unit testing

-
    -
  • Unit test code can help everyone to go into details and understand the function of the code.
  • -
  • We can find bugs by test case, and then enhance the robustness of the code.
  • -
  • Test case code is also the demo usage of the core code.
  • -
-

2.Some design principle of unit test case

-
    -
  • Steps, fine-grained and combination conditions should be well designed.
  • -
  • Attention to boundary condition test
  • -
  • Test code should also be designed without writing useless code.
  • -
  • When you find a method that is hard to write unit test, if you can be sure the method is "smelly code", then refactor it with the committer.
  • -
  • The mock framework in seata is: mockito. Some tutorials:mockito tutorial,mockito refcard
  • -
  • TDD(optional):When you start a new issue, you can try to write test case at first
  • -
-

3.The specified value of the test coverage

-
    -
  • In the stage, the test coverage specified value of delta changed codes is :>=80%. The higher, the better.
  • -
  • We can see the coverage report in this page: https://codecov.io/gh/seata/seata
  • -
-
- - - - - - - diff --git a/en-us/docs/developers/contributor-guide/test-coverage-guide_dev.json b/en-us/docs/developers/contributor-guide/test-coverage-guide_dev.json deleted file mode 100644 index c20d3374..00000000 --- a/en-us/docs/developers/contributor-guide/test-coverage-guide_dev.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "test-coverage-guide_dev.md", - "__html": "

Test coverage guide

\n

1.The benefits of unit testing

\n
    \n
  • Unit test code can help everyone to go into details and understand the function of the code.
  • \n
  • We can find bugs by test case, and then enhance the robustness of the code.
  • \n
  • Test case code is also the demo usage of the core code.
  • \n
\n

2.Some design principle of unit test case

\n
    \n
  • Steps, fine-grained and combination conditions should be well designed.
  • \n
  • Attention to boundary condition test
  • \n
  • Test code should also be designed without writing useless code.
  • \n
  • When you find a method that is hard to write unit test, if you can be sure the method is "smelly code", then refactor it with the committer.
  • \n
  • The mock framework in seata is: mockito. Some tutorials:mockito tutorial,mockito refcard
  • \n
  • TDD(optional):When you start a new issue, you can try to write test case at first
  • \n
\n

3.The specified value of the test coverage

\n
    \n
  • In the stage, the test coverage specified value of delta changed codes is :>=80%. The higher, the better.
  • \n
  • We can see the coverage report in this page: https://codecov.io/gh/seata/seata
  • \n
\n", - "link": "/en-us/docs/developers/contributor-guide/test-coverage-guide_dev.html", - "meta": { - "title": "Test coverage guide", - "keywords": "Seata, coverage", - "description": "Test coverage guide." - } -} \ No newline at end of file diff --git a/en-us/docs/developers/developers_dev.html b/en-us/docs/developers/developers_dev.html deleted file mode 100644 index a1988a09..00000000 --- a/en-us/docs/developers/developers_dev.html +++ /dev/null @@ -1,188 +0,0 @@ - - - - - - - - - - Developers - - - - -
Documentation

Seata Team

-

This page shows Seata developers and continues to expand. The list is not prioritized.

-

Seata Committer List

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
姓名github公司
Min JislievrlyAlibabba
Haiqiang ShensharajavaAlibabba
Qipeng LiwlliqipengAlibabba
Liandong Chenpurple-forceAlibabba
Jiang Yujiangyu-gtsAlibabba
Wei Zhangjezhang2014Alibabba
Sen ZhangzhangthenAntfin
Guangshu Wangwgs13579Antfin
Geng ZhangujjboyAntfin
Zhiyuan LeileizhiyuanAntfin
Qing Wangjovany-wangAntfin
Jiangke WuxingfudeshiTruthai
Zhao LiCoffeeLatte007YuanFuDao
Xin WanglovepoemWeiDian
Guoyao Yuangithub-ygyTuya
Xu Zhangzhangxu19830126InfiniVision
Shuaipeng RenniaoshuaiHuanQiuYouLu
Donglin Zhaitony-zdlSprings Capital
Xugang ShenxuririseCNIC
Deyou XuskyesxWeBank
Jinlei ZhuangzjinleiHelios
Jiawei Zhangl81893521Locals
Zhengtao ZhongjsbxyyxShenzhen arts
Long Chenlong187Antfin
-

Seata Developer Roles

-

Seata developers include three roles: Maintainer, Committer, and Contributor. The standard definitions for each role are as follows.

-

Maintainer

-

Maintainer is an individual who has made a significant contribution to the evolution and development of the Seata project, including projects under the seata group. Specifically includes the following criteria:

-
    -
  • Completing the design and development of multiple key modules or projects, is an core developer of the project.
  • -
  • Continuous investment and passion, can actively participate in the maintenance of related matters such as community, official website, issue, PR, etc.
  • -
  • Has a visible influence in the community and is able to represent Seata in important community meetings and events.
  • -
  • Have the consciousness and ability to cultivate Committer and Contributor.
  • -
-

Committer

-

Committer is an individual with write access to the Seata repository and includes the following criteria:

-
    -
  • An individual who can contribute to the issue and PR continuously for a long time.
  • -
  • Participate in the maintenance of the issue list and discussion of important features.
  • -
  • Participate in code review.
  • -
-

Contributor

-

Contributor is an individual who contributes to the Seata project. The standard is:

-
    -
  • Submitted a PR that is merged.
  • -
-
- - - - - - - diff --git a/en-us/docs/developers/developers_dev.json b/en-us/docs/developers/developers_dev.json deleted file mode 100644 index bb7198f5..00000000 --- a/en-us/docs/developers/developers_dev.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "developers_dev.md", - "__html": "

Seata Team

\n

This page shows Seata developers and continues to expand. The list is not prioritized.

\n

Seata Committer List

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
姓名github公司
Min JislievrlyAlibabba
Haiqiang ShensharajavaAlibabba
Qipeng LiwlliqipengAlibabba
Liandong Chenpurple-forceAlibabba
Jiang Yujiangyu-gtsAlibabba
Wei Zhangjezhang2014Alibabba
Sen ZhangzhangthenAntfin
Guangshu Wangwgs13579Antfin
Geng ZhangujjboyAntfin
Zhiyuan LeileizhiyuanAntfin
Qing Wangjovany-wangAntfin
Jiangke WuxingfudeshiTruthai
Zhao LiCoffeeLatte007YuanFuDao
Xin WanglovepoemWeiDian
Guoyao Yuangithub-ygyTuya
Xu Zhangzhangxu19830126InfiniVision
Shuaipeng RenniaoshuaiHuanQiuYouLu
Donglin Zhaitony-zdlSprings Capital
Xugang ShenxuririseCNIC
Deyou XuskyesxWeBank
Jinlei ZhuangzjinleiHelios
Jiawei Zhangl81893521Locals
Zhengtao ZhongjsbxyyxShenzhen arts
Long Chenlong187Antfin
\n

Seata Developer Roles

\n

Seata developers include three roles: Maintainer, Committer, and Contributor. The standard definitions for each role are as follows.

\n

Maintainer

\n

Maintainer is an individual who has made a significant contribution to the evolution and development of the Seata project, including projects under the seata group. Specifically includes the following criteria:

\n
    \n
  • Completing the design and development of multiple key modules or projects, is an core developer of the project.
  • \n
  • Continuous investment and passion, can actively participate in the maintenance of related matters such as community, official website, issue, PR, etc.
  • \n
  • Has a visible influence in the community and is able to represent Seata in important community meetings and events.
  • \n
  • Have the consciousness and ability to cultivate Committer and Contributor.
  • \n
\n

Committer

\n

Committer is an individual with write access to the Seata repository and includes the following criteria:

\n
    \n
  • An individual who can contribute to the issue and PR continuously for a long time.
  • \n
  • Participate in the maintenance of the issue list and discussion of important features.
  • \n
  • Participate in code review.
  • \n
\n

Contributor

\n

Contributor is an individual who contributes to the Seata project. The standard is:

\n
    \n
  • Submitted a PR that is merged.
  • \n
\n", - "link": "/en-us/docs/developers/developers_dev.html", - "meta": { - "title": "Developers", - "keywords": "Seata, Developers", - "description": "Seata Team." - } -} \ No newline at end of file diff --git a/en-us/docs/developers/guide_dev.html b/en-us/docs/developers/guide_dev.html deleted file mode 100644 index 586d4672..00000000 --- a/en-us/docs/developers/guide_dev.html +++ /dev/null @@ -1,185 +0,0 @@ - - - - - - - - - - Contributing to Seata - - - - -
Documentation

Contributing to Seata

-

It is warmly welcomed if you have interest to hack on Seata. First, we encourage this kind of willing very much. And here is a list of contributing guide for you.

-

Topics

- -

Reporting security issues

-

Security issues are always treated seriously. As our usual principle, we discourage anyone to spread security issues. If you find a security issue of Seata, please do not discuss it in public and even do not open a public issue. Instead we encourage you to send us a private email to dev-seata@googlegroups.com to report this.

-

Reporting general issues

-

To be honest, we regard every user of Seata as a very kind contributor. After experiencing Seata, you may have some feedback for the project. Then feel free to open an issue via NEW ISSUE.

-

Since we collaborate project Seata in a distributed way, we appreciate WELL-WRITTEN, DETAILED, EXPLICIT issue reports. To make the communication more efficient, we wish everyone could search if your issue is an existing one in the searching list. If you find it existing, please add your details in comments under the existing issue instead of opening a brand new one.

-

To make the issue details as standard as possible, we setup an ISSUE TEMPLATE for issue reporters. Please BE SURE to follow the instructions to fill fields in template.

-

There are a lot of cases when you could open an issue:

-
    -
  • bug report
  • -
  • feature request
  • -
  • performance issues
  • -
  • feature proposal
  • -
  • feature design
  • -
  • help wanted
  • -
  • doc incomplete
  • -
  • test improvement
  • -
  • any questions on project
  • -
  • and so on
  • -
-

Also we must remind that when filling a new issue, please remember to remove the sensitive data from your post. Sensitive data could be password, secret key, network locations, private business data and so on.

-

Code and doc contribution

-

Every action to make project Seata better is encouraged. On GitHub, every improvement for Seata could be via a PR (short for pull request).

-
    -
  • If you find a typo, try to fix it!
  • -
  • If you find a bug, try to fix it!
  • -
  • If you find some redundant codes, try to remove them!
  • -
  • If you find some test cases missing, try to add them!
  • -
  • If you could enhance a feature, please DO NOT hesitate!
  • -
  • If you find code implicit, try to add comments to make it clear!
  • -
  • If you find code ugly, try to refactor that!
  • -
  • If you can help to improve documents, it could not be better!
  • -
  • If you find document incorrect, just do it and fix that!
  • -
  • ...
  • -
-

Actually it is impossible to list them completely. Just remember one principle:

-
-

WE ARE LOOKING FORWARD TO ANY PR FROM YOU.

-
-

Since you are ready to improve Seata with a PR, we suggest you could take a look at the PR rules here.

- -

Workspace Preparation

-

To put forward a PR, we assume you have registered a GitHub ID. Then you could finish the preparation in the following steps:

-
    -
  1. -

    FORK Seata to your repository. To make this work, you just need to click the button Fork in right-left of seata/seata main page. Then you will end up with your repository in https://github.com/<your-username>/seata, in which your-username is your GitHub username.

    -
  2. -
  3. -

    CLONE your own repository to develop locally. Use git clone git@github.com:<your-username>/seata.git to clone repository to your local machine. Then you can create new branches to finish the change you wish to make.

    -
  4. -
  5. -

    Set Remote upstream to be git@github.com:seata/seata.git using the following two commands:

    -
  6. -
-
git remote add upstream git@github.com:seata/seata.git
-git remote set-url --push upstream no-pushing
-
-

With this remote setting, you can check your git remote configuration like this:

-
$ git remote -v
-origin     git@github.com:<your-username>/seata.git (fetch)
-origin     git@github.com:<your-username>/seata.git (push)
-upstream   git@github.com:seata/seata.git (fetch)
-upstream   no-pushing (push)
-
-

Adding this, we can easily synchronize local branches with upstream branches.

-

Branch Definition

-

Right now we assume every contribution via pull request is for branch develop in Seata. Before contributing, be aware of branch definition would help a lot.

-

As a contributor, keep in mind again that every contribution via pull request is for branch develop. While in project Seata, there are several other branches, we generally call them release branches(such as 0.6.0,0.6.1), feature branches, hotfix branches and master branch.

-

When officially releasing a version, there will be a release branch and named with the version number.

-

After the release, we will merge the commit of the release branch into the master branch.

-

When we find that there is a bug in a certain version, we will decide to fix it in a later version or fix it in a specific hotfix version. When we decide to fix the hotfix version, we will checkout the hotfix branch based on the corresponding release branch, perform code repair and verification, and merge it into the develop branch and the master branch.

-

For larger features, we will pull out the feature branch for development and verification.

-

Commit Rules

-

Actually in Seata, we take two rules serious when committing:

- -

Commit Message

-

Commit message could help reviewers better understand what is the purpose of submitted PR. It could help accelerate the code review procedure as well. We encourage contributors to use EXPLICIT commit message rather than ambiguous message. In general, we advocate the following commit message type:

-
    -
  • docs: xxxx. For example, "docs: add docs about Seata cluster installation".
  • -
  • feature: xxxx.For example, "feature: support oracle in AT mode".
  • -
  • bugfix: xxxx. For example, "bugfix: fix panic when input nil parameter".
  • -
  • refactor: xxxx. For example, "refactor: simplify to make codes more readable".
  • -
  • test: xxx. For example, "test: add unit test case for func InsertIntoArray".
  • -
  • other readable and explicit expression ways.
  • -
-

On the other side, we discourage contributors from committing message like the following ways:

-
    -
  • fix bug
  • -
  • update
  • -
  • add doc
  • -
-

If you get lost, please see How to Write a Git Commit Message for a start.

-

Commit Content

-

Commit content represents all content changes included in one commit. We had better include things in one single commit which could support reviewer's complete review without any other commits' help. In another word, contents in one single commit can pass the CI to avoid code mess. In brief, there are three minor rules for us to keep in mind:

-
    -
  • avoid very large change in a commit;
  • -
  • complete and reviewable for each commit.
  • -
  • check git config(user.name, user.email) when committing to ensure that it is associated with your github ID.
  • -
-

In addition, in the code change part, we suggest that all contributors should read the code style of Seata.

-

No matter commit message, or commit content, we do take more emphasis on code review.

-

PR Description

-

PR is the only way to make change to Seata project files. To help reviewers better get your purpose, PR description could not be too detailed. We encourage contributors to follow the PR template to finish the pull request.

-

Test case contribution

-

Any test case would be welcomed. Currently, Seata function test cases are high priority.

-
    -
  • -

    For unit test, you need to create a test file named xxxTest.java in the test directory of the same module. Recommend you to use the junit5 UT framework

    -
  • -
  • -

    For integration test, you can put the integration test in the test directory or the seata-test module. It is recommended to use the mockito test framework.

    -
  • -
-

Engage to help anything

-

We choose GitHub as the primary place for Seata to collaborate. So the latest updates of Seata are always here. Although contributions via PR is an explicit way to help, we still call for any other ways.

-
    -
  • reply to other's issues if you could;
  • -
  • help solve other user's problems;
  • -
  • help review other's PR design;
  • -
  • help review other's codes in PR;
  • -
  • discuss about Seata to make things clearer;
  • -
  • advocate Seata technology beyond GitHub;
  • -
  • write blogs on Seata and so on.
  • -
-

Code Style

-

Seata code style Comply with Alibaba Java Coding Guidelines.

-

Guidelines

-

Alibaba-Java-Coding-Guidelines

-

IDE Plugin Install(not necessary)

-

It is not necessary to install, if you want to find a problem when you are coding.

-

idea IDE

-

p3c-idea-plugin-install

-

eclipse IDE

-

p3c-eclipse-plugin-install

-

In a word, ANY HELP IS CONTRIBUTION.

-
- - - - - - - diff --git a/en-us/docs/developers/guide_dev.json b/en-us/docs/developers/guide_dev.json deleted file mode 100644 index be76bf44..00000000 --- a/en-us/docs/developers/guide_dev.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "guide_dev.md", - "__html": "

Contributing to Seata

\n

It is warmly welcomed if you have interest to hack on Seata. First, we encourage this kind of willing very much. And here is a list of contributing guide for you.

\n

Topics

\n\n

Reporting security issues

\n

Security issues are always treated seriously. As our usual principle, we discourage anyone to spread security issues. If you find a security issue of Seata, please do not discuss it in public and even do not open a public issue. Instead we encourage you to send us a private email to dev-seata@googlegroups.com to report this.

\n

Reporting general issues

\n

To be honest, we regard every user of Seata as a very kind contributor. After experiencing Seata, you may have some feedback for the project. Then feel free to open an issue via NEW ISSUE.

\n

Since we collaborate project Seata in a distributed way, we appreciate WELL-WRITTEN, DETAILED, EXPLICIT issue reports. To make the communication more efficient, we wish everyone could search if your issue is an existing one in the searching list. If you find it existing, please add your details in comments under the existing issue instead of opening a brand new one.

\n

To make the issue details as standard as possible, we setup an ISSUE TEMPLATE for issue reporters. Please BE SURE to follow the instructions to fill fields in template.

\n

There are a lot of cases when you could open an issue:

\n
    \n
  • bug report
  • \n
  • feature request
  • \n
  • performance issues
  • \n
  • feature proposal
  • \n
  • feature design
  • \n
  • help wanted
  • \n
  • doc incomplete
  • \n
  • test improvement
  • \n
  • any questions on project
  • \n
  • and so on
  • \n
\n

Also we must remind that when filling a new issue, please remember to remove the sensitive data from your post. Sensitive data could be password, secret key, network locations, private business data and so on.

\n

Code and doc contribution

\n

Every action to make project Seata better is encouraged. On GitHub, every improvement for Seata could be via a PR (short for pull request).

\n
    \n
  • If you find a typo, try to fix it!
  • \n
  • If you find a bug, try to fix it!
  • \n
  • If you find some redundant codes, try to remove them!
  • \n
  • If you find some test cases missing, try to add them!
  • \n
  • If you could enhance a feature, please DO NOT hesitate!
  • \n
  • If you find code implicit, try to add comments to make it clear!
  • \n
  • If you find code ugly, try to refactor that!
  • \n
  • If you can help to improve documents, it could not be better!
  • \n
  • If you find document incorrect, just do it and fix that!
  • \n
  • ...
  • \n
\n

Actually it is impossible to list them completely. Just remember one principle:

\n
\n

WE ARE LOOKING FORWARD TO ANY PR FROM YOU.

\n
\n

Since you are ready to improve Seata with a PR, we suggest you could take a look at the PR rules here.

\n\n

Workspace Preparation

\n

To put forward a PR, we assume you have registered a GitHub ID. Then you could finish the preparation in the following steps:

\n
    \n
  1. \n

    FORK Seata to your repository. To make this work, you just need to click the button Fork in right-left of seata/seata main page. Then you will end up with your repository in https://github.com/<your-username>/seata, in which your-username is your GitHub username.

    \n
  2. \n
  3. \n

    CLONE your own repository to develop locally. Use git clone git@github.com:<your-username>/seata.git to clone repository to your local machine. Then you can create new branches to finish the change you wish to make.

    \n
  4. \n
  5. \n

    Set Remote upstream to be git@github.com:seata/seata.git using the following two commands:

    \n
  6. \n
\n
git remote add upstream git@github.com:seata/seata.git\ngit remote set-url --push upstream no-pushing\n
\n

With this remote setting, you can check your git remote configuration like this:

\n
$ git remote -v\norigin     git@github.com:<your-username>/seata.git (fetch)\norigin     git@github.com:<your-username>/seata.git (push)\nupstream   git@github.com:seata/seata.git (fetch)\nupstream   no-pushing (push)\n
\n

Adding this, we can easily synchronize local branches with upstream branches.

\n

Branch Definition

\n

Right now we assume every contribution via pull request is for branch develop in Seata. Before contributing, be aware of branch definition would help a lot.

\n

As a contributor, keep in mind again that every contribution via pull request is for branch develop. While in project Seata, there are several other branches, we generally call them release branches(such as 0.6.0,0.6.1), feature branches, hotfix branches and master branch.

\n

When officially releasing a version, there will be a release branch and named with the version number.

\n

After the release, we will merge the commit of the release branch into the master branch.

\n

When we find that there is a bug in a certain version, we will decide to fix it in a later version or fix it in a specific hotfix version. When we decide to fix the hotfix version, we will checkout the hotfix branch based on the corresponding release branch, perform code repair and verification, and merge it into the develop branch and the master branch.

\n

For larger features, we will pull out the feature branch for development and verification.

\n

Commit Rules

\n

Actually in Seata, we take two rules serious when committing:

\n\n

Commit Message

\n

Commit message could help reviewers better understand what is the purpose of submitted PR. It could help accelerate the code review procedure as well. We encourage contributors to use EXPLICIT commit message rather than ambiguous message. In general, we advocate the following commit message type:

\n
    \n
  • docs: xxxx. For example, "docs: add docs about Seata cluster installation".
  • \n
  • feature: xxxx.For example, "feature: support oracle in AT mode".
  • \n
  • bugfix: xxxx. For example, "bugfix: fix panic when input nil parameter".
  • \n
  • refactor: xxxx. For example, "refactor: simplify to make codes more readable".
  • \n
  • test: xxx. For example, "test: add unit test case for func InsertIntoArray".
  • \n
  • other readable and explicit expression ways.
  • \n
\n

On the other side, we discourage contributors from committing message like the following ways:

\n
    \n
  • fix bug
  • \n
  • update
  • \n
  • add doc
  • \n
\n

If you get lost, please see How to Write a Git Commit Message for a start.

\n

Commit Content

\n

Commit content represents all content changes included in one commit. We had better include things in one single commit which could support reviewer's complete review without any other commits' help. In another word, contents in one single commit can pass the CI to avoid code mess. In brief, there are three minor rules for us to keep in mind:

\n
    \n
  • avoid very large change in a commit;
  • \n
  • complete and reviewable for each commit.
  • \n
  • check git config(user.name, user.email) when committing to ensure that it is associated with your github ID.
  • \n
\n

In addition, in the code change part, we suggest that all contributors should read the code style of Seata.

\n

No matter commit message, or commit content, we do take more emphasis on code review.

\n

PR Description

\n

PR is the only way to make change to Seata project files. To help reviewers better get your purpose, PR description could not be too detailed. We encourage contributors to follow the PR template to finish the pull request.

\n

Test case contribution

\n

Any test case would be welcomed. Currently, Seata function test cases are high priority.

\n
    \n
  • \n

    For unit test, you need to create a test file named xxxTest.java in the test directory of the same module. Recommend you to use the junit5 UT framework

    \n
  • \n
  • \n

    For integration test, you can put the integration test in the test directory or the seata-test module. It is recommended to use the mockito test framework.

    \n
  • \n
\n

Engage to help anything

\n

We choose GitHub as the primary place for Seata to collaborate. So the latest updates of Seata are always here. Although contributions via PR is an explicit way to help, we still call for any other ways.

\n
    \n
  • reply to other's issues if you could;
  • \n
  • help solve other user's problems;
  • \n
  • help review other's PR design;
  • \n
  • help review other's codes in PR;
  • \n
  • discuss about Seata to make things clearer;
  • \n
  • advocate Seata technology beyond GitHub;
  • \n
  • write blogs on Seata and so on.
  • \n
\n

Code Style

\n

Seata code style Comply with Alibaba Java Coding Guidelines.

\n

Guidelines

\n

Alibaba-Java-Coding-Guidelines

\n

IDE Plugin Install(not necessary)

\n

It is not necessary to install, if you want to find a problem when you are coding.

\n

idea IDE

\n

p3c-idea-plugin-install

\n

eclipse IDE

\n

p3c-eclipse-plugin-install

\n

In a word, ANY HELP IS CONTRIBUTION.

\n", - "link": "/en-us/docs/developers/guide_dev.html", - "meta": { - "title": "Contributing to Seata", - "keywords": "Seata", - "description": "It is warmly welcomed if you have interest to hack on Seata. First, we encourage this kind of willing very much. And here is a list of contributing guide for you." - } -} \ No newline at end of file diff --git a/en-us/docs/developers/maintainers.html b/en-us/docs/developers/maintainers.html deleted file mode 100644 index 81dd4d6a..00000000 --- a/en-us/docs/developers/maintainers.html +++ /dev/null @@ -1,72 +0,0 @@ - - - - - - - - - - Seata Maintainers - - - - -
Documentation

Seata Maintainers

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
GitHub IDNameEmailCompany
allencloudAllen Sunallensun.shl@alibaba-inc.comAlibaba Group
chenchaobingChaobing Chenchenchaobing@126.comMeitu
garfield009Zuozheng Huzuozheng.hzz@alibaba-inc.comAlibaba Group
lowzjJin Zhangzj3142063@gmail.comAlibaba Group
wangj998Jian Wangmingzhi.wj@alibaba-inc.comAlibaba Group
zhouhaibing089Haibing Zhouzhouhaibing089@gmail.comeBay
-
- - - - - - diff --git a/en-us/docs/developers/maintainers.json b/en-us/docs/developers/maintainers.json deleted file mode 100644 index b017b2f7..00000000 --- a/en-us/docs/developers/maintainers.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "maintainers.md", - "__html": "

Seata Maintainers

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
GitHub IDNameEmailCompany
allencloudAllen Sunallensun.shl@alibaba-inc.comAlibaba Group
chenchaobingChaobing Chenchenchaobing@126.comMeitu
garfield009Zuozheng Huzuozheng.hzz@alibaba-inc.comAlibaba Group
lowzjJin Zhangzj3142063@gmail.comAlibaba Group
wangj998Jian Wangmingzhi.wj@alibaba-inc.comAlibaba Group
zhouhaibing089Haibing Zhouzhouhaibing089@gmail.comeBay
\n", - "link": "/en-us/docs/developers/maintainers.html", - "meta": { - "title": "Seata Maintainers", - "keywords": "Seata, Maintainers", - "description": "A list of Seata maintainers" - } -} \ No newline at end of file diff --git a/en-us/docs/faq.html b/en-us/docs/faq.html deleted file mode 100644 index 789001f2..00000000 --- a/en-us/docs/faq.html +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - faq - - - - -
Documentation

FAQ

-

TBD

-
- - - - - - diff --git a/en-us/docs/faq.json b/en-us/docs/faq.json deleted file mode 100644 index e144e319..00000000 --- a/en-us/docs/faq.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "filename": "faq.md", - "__html": "

FAQ

\n

TBD

\n", - "link": "/en-us/docs/faq.html", - "meta": {} -} \ No newline at end of file diff --git a/en-us/docs/faq/faq.html b/en-us/docs/faq/faq.html deleted file mode 100644 index a89f718e..00000000 --- a/en-us/docs/faq/faq.html +++ /dev/null @@ -1,27 +0,0 @@ - - - - - - - - - - faq - - - - -
Documentation

FAQ

-

java.lang.NoSuchMethodError: com.fasterxml.jackson.databind.jsontype.TypeSerializer.typeId(Ljava/lang/Object;Lcom/fasterxml/jackson/core/JsonToken;)

-
undolog序列化配置为jackson时,jackson版本需要为2.9.9+
-
-
- - - - - - diff --git a/en-us/docs/faq/faq.json b/en-us/docs/faq/faq.json deleted file mode 100644 index d8ce03be..00000000 --- a/en-us/docs/faq/faq.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "filename": "faq.md", - "__html": "

FAQ

\n

java.lang.NoSuchMethodError: com.fasterxml.jackson.databind.jsontype.TypeSerializer.typeId(Ljava/lang/Object;Lcom/fasterxml/jackson/core/JsonToken;)

\n
undolog序列化配置为jackson时,jackson版本需要为2.9.9+\n
\n", - "link": "/en-us/docs/faq/faq.html", - "meta": {} -} \ No newline at end of file diff --git a/en-us/docs/ops/deploy-by-docker.html b/en-us/docs/ops/deploy-by-docker.html deleted file mode 100644 index 22961d3b..00000000 --- a/en-us/docs/ops/deploy-by-docker.html +++ /dev/null @@ -1,114 +0,0 @@ - - - - - - - - - - Deploy Seata Server By Docker - - - - -
Documentation

Deploy Seata Server By Docker

-

Quick Start

-

Start a seata-server instance

-
$ docker run --name seata-server -p 8091:8091 seataio/seata-server:latest
-
-

Use custom configuration file

-
$ docker run --name seata-server \
-        -p 8091:8091 \
-        -e SEATA_CONFIG_NAME=file:/root/seata-config/registry \
-        -v /PATH/TO/CONFIG_FILE:/root/seata-config  \
-        seataio/seata-server
-
-

Specify server IP

-
$ docker run --name seata-server \
-        -p 8091:8091 \
-        -e SEATA_IP=192.168.1.1 \
-        seataio/seata-server
-
-

Docker compose

-

Example of docker-conmpose.yaml

-
version: "3"
-services:
-  seata-server:
-    image: seataio/seata-server
-    hostname: seata-server
-    ports:
-      - "8091:8091"
-    environment:
-      - SEATA_PORT=8091
-      - STORE_MODE=file
-
-

Container shell access and viewing logs

-
$ docker exec -it seata-server sh
-
-
$ tail -f /root/logs/seata/seata-server.log
-
-

Using custom configuration file

-

The default configuration could be found under path /seata-server/resources, suggest that put your custom configuration under other directories. And the environment variableSEATA_CONFIG_NAME is required when use custom configuration, and the value must be started with file: like file:/root/seata-config/registry:

-
$ docker run --name seata-server \
-        -p 8091:8091 \
-        -e SEATA_CONFIG_NAME=file:/root/seata-config/registry \
-        -v /PATH/TO/CONFIG_FILE:/root/seata-config  \
-        seataio/seata-server
-
-

Environment Variables

-

You can modify configuration of seata-server by the environment variables like this:

-
    -
  • SEATA_IP
  • -
-
-

The variable is optional, specifies registry IP instead of the container IP in registry center like eureka or others.

-
-
    -
  • SEATA_PORT
  • -
-
-

The variable is optional, specifies seata-server port, default is 8091

-
-
    -
  • STORE_MODE
  • -
-
-

The variable is optional, specifies the log store mode of seata-server, support db and file, default is file.

-
-
    -
  • SERVER_NODE
  • -
-
-

The variable is optional, specifies the seata-server node ID, like 1,2,3..., default is 1

-
-
    -
  • SEATA_ENV
  • -
-
-

The variable is optional, specifies the seata-server environment, like dev, test etc. Then server will find file like registry-dev.conf under the configuration path when start.

-
-
    -
  • SEATA_CONFIG_NAME
  • -
-
-

The variable is optional, specifies the configuration file path, like the file:/root/registry, will load file/root/registry.conf as configuration.

-
-
- - - - - - - diff --git a/en-us/docs/ops/deploy-by-docker.json b/en-us/docs/ops/deploy-by-docker.json deleted file mode 100644 index 3493e97d..00000000 --- a/en-us/docs/ops/deploy-by-docker.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "filename": "deploy-by-docker.md", - "__html": "

Deploy Seata Server By Docker

\n

Quick Start

\n

Start a seata-server instance

\n
$ docker run --name seata-server -p 8091:8091 seataio/seata-server:latest\n
\n

Use custom configuration file

\n
$ docker run --name seata-server \\\n        -p 8091:8091 \\\n        -e SEATA_CONFIG_NAME=file:/root/seata-config/registry \\\n        -v /PATH/TO/CONFIG_FILE:/root/seata-config  \\\n        seataio/seata-server\n
\n

Specify server IP

\n
$ docker run --name seata-server \\\n        -p 8091:8091 \\\n        -e SEATA_IP=192.168.1.1 \\\n        seataio/seata-server\n
\n

Docker compose

\n

Example of docker-conmpose.yaml

\n
version: \"3\"\nservices:\n  seata-server:\n    image: seataio/seata-server\n    hostname: seata-server\n    ports:\n      - \"8091:8091\"\n    environment:\n      - SEATA_PORT=8091\n      - STORE_MODE=file\n
\n

Container shell access and viewing logs

\n
$ docker exec -it seata-server sh\n
\n
$ tail -f /root/logs/seata/seata-server.log\n
\n

Using custom configuration file

\n

The default configuration could be found under path /seata-server/resources, suggest that put your custom configuration under other directories. And the environment variableSEATA_CONFIG_NAME is required when use custom configuration, and the value must be started with file: like file:/root/seata-config/registry:

\n
$ docker run --name seata-server \\\n        -p 8091:8091 \\\n        -e SEATA_CONFIG_NAME=file:/root/seata-config/registry \\\n        -v /PATH/TO/CONFIG_FILE:/root/seata-config  \\\n        seataio/seata-server\n
\n

Environment Variables

\n

You can modify configuration of seata-server by the environment variables like this:

\n
    \n
  • SEATA_IP
  • \n
\n
\n

The variable is optional, specifies registry IP instead of the container IP in registry center like eureka or others.

\n
\n
    \n
  • SEATA_PORT
  • \n
\n
\n

The variable is optional, specifies seata-server port, default is 8091

\n
\n
    \n
  • STORE_MODE
  • \n
\n
\n

The variable is optional, specifies the log store mode of seata-server, support db and file, default is file.

\n
\n
    \n
  • SERVER_NODE
  • \n
\n
\n

The variable is optional, specifies the seata-server node ID, like 1,2,3..., default is 1

\n
\n
    \n
  • SEATA_ENV
  • \n
\n
\n

The variable is optional, specifies the seata-server environment, like dev, test etc. Then server will find file like registry-dev.conf under the configuration path when start.

\n
\n
    \n
  • SEATA_CONFIG_NAME
  • \n
\n
\n

The variable is optional, specifies the configuration file path, like the file:/root/registry, will load file/root/registry.conf as configuration.

\n
\n", - "link": "/en-us/docs/ops/deploy-by-docker.html", - "meta": { - "hidden": "true", - "title": "Deploy Seata Server By Docker", - "keywords": "docker", - "description": "Deploy Seata Server By Docker", - "author": "helloworlde", - "date": "2019-11-25" - } -} \ No newline at end of file diff --git a/en-us/docs/ops/deploy-by-helm.html b/en-us/docs/ops/deploy-by-helm.html deleted file mode 100644 index 40bb6710..00000000 --- a/en-us/docs/ops/deploy-by-helm.html +++ /dev/null @@ -1,69 +0,0 @@ - - - - - - - - - - Deploy Seata Server By Helm - - - - -
Documentation

Deploy Seata Server By Helm

-

Quick start

-
$ cd ./script/server/helm/seata-server
-$ helm install seata-server ./seata-server
-
-

Custom configuration

-

Environment

-

The environment is same with Docker, can reference Deploy Seata Server By Docker

-

Use specify configuration file

-

Can specify configuration file by mount files, like mount files under /root/workspace/seata/seata-config/file to pod. And need specify environment SEATA_CONFIG_NAME also, the value need start with file:, like file:/root/seata-config/registry

-
    -
  • Values.yaml
  • -
-
replicaCount: 1
-
-namespace: default
-
-image:
-  repository: seataio/seata-server
-  tag: latest
-  pullPolicy: IfNotPresent
-
-service:
-  type: NodePort
-  port: 30091
-
-env:
-  seataPort: "8091"
-  storeMode: "file"
-  seataIp: "127.0.0.1"
-  seataConfigName: "file:/root/seata-config/registry"
-
-volume:
-  - name: seata-config
-    mountPath: /root/seata-config
-    hostPath: /root/workspace/seata/seata-config/file
-
-
- - - - - - - diff --git a/en-us/docs/ops/deploy-by-helm.json b/en-us/docs/ops/deploy-by-helm.json deleted file mode 100644 index 869ed431..00000000 --- a/en-us/docs/ops/deploy-by-helm.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "filename": "deploy-by-helm.md", - "__html": "

Deploy Seata Server By Helm

\n

Quick start

\n
$ cd ./script/server/helm/seata-server\n$ helm install seata-server ./seata-server\n
\n

Custom configuration

\n

Environment

\n

The environment is same with Docker, can reference Deploy Seata Server By Docker

\n

Use specify configuration file

\n

Can specify configuration file by mount files, like mount files under /root/workspace/seata/seata-config/file to pod. And need specify environment SEATA_CONFIG_NAME also, the value need start with file:, like file:/root/seata-config/registry

\n
    \n
  • Values.yaml
  • \n
\n
replicaCount: 1\n\nnamespace: default\n\nimage:\n  repository: seataio/seata-server\n  tag: latest\n  pullPolicy: IfNotPresent\n\nservice:\n  type: NodePort\n  port: 30091\n\nenv:\n  seataPort: \"8091\"\n  storeMode: \"file\"\n  seataIp: \"127.0.0.1\"\n  seataConfigName: \"file:/root/seata-config/registry\"\n\nvolume:\n  - name: seata-config\n    mountPath: /root/seata-config\n    hostPath: /root/workspace/seata/seata-config/file\n
\n", - "link": "/en-us/docs/ops/deploy-by-helm.html", - "meta": { - "hidden": "true", - "title": "Deploy Seata Server By Helm", - "keywords": "kubernetes,helm,ops", - "description": "Deploy Seata Server By Helm", - "author": "helloworlde", - "date": "2019-12-01" - } -} \ No newline at end of file diff --git a/en-us/docs/ops/deploy-by-kubernetes.html b/en-us/docs/ops/deploy-by-kubernetes.html deleted file mode 100644 index 088bfe42..00000000 --- a/en-us/docs/ops/deploy-by-kubernetes.html +++ /dev/null @@ -1,135 +0,0 @@ - - - - - - - - - - Deploy Seata Server By Kubernetes - - - - -
Documentation

Deploy Seata Server By Kubernetes

-

Quick Start

-

Create file seata-server.yaml

-
apiVersion: v1
-kind: Service
-metadata:
-  name: seata-server
-  namespace: default
-  labels:
-    k8s-app: seata-server
-spec:
-  type: NodePort
-  ports:
-    - port: 8091
-      nodePort: 30091
-      protocol: TCP
-      name: http
-  selector:
-    k8s-app: seata-server
-
----
-
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: seata-server
-  namespace: default
-  labels:
-    k8s-app: seata-server
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      k8s-app: seata-server
-  template:
-    metadata:
-      labels:
-        k8s-app: seata-server
-    spec:
-      containers:
-        - name: seata-server
-          image: docker.io/seataio/seata-server:latest
-          imagePullPolicy: IfNotPresent
-          env:
-            - name: SEATA_PORT
-              value: "8091"
-            - name: STORE_MODE
-              value: file
-          ports:
-            - name: http
-              containerPort: 8091
-              protocol: TCP
-
-
$ kubectl apply -f seata-server.yaml
-
-

Custom configuration

-

Environment

-

The environment is same with Docker, can reference Deploy Seata Server By Docker

-

Use specify configuration file

-

Can specify configuration file by mount files, like mount files under /root/workspace/seata/seata-config/file to pod. And need specify environment SEATA_CONFIG_NAME also, the value need start with file:, like file:/root/seata-config/registry

-
    -
  • Deployment
  • -
-
apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: seata-server
-  namespace: default
-  labels:
-    k8s-app: seata-server
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      k8s-app: seata-server
-  template:
-    metadata:
-      labels:
-        k8s-app: seata-server
-    spec:
-      containers:
-        - name: seata-server
-          image: docker.io/seataio/seata-server:latest
-          imagePullPolicy: IfNotPresent
-          env:
-            - name: SEATA_PORT
-              value: "8091"
-            - name: STORE_MODE
-              value: file
-            - name: SEATA_CONFIG_NAME
-              value: file:/root/seata-config/registry
-          ports:
-            - name: http
-              containerPort: 8091
-              protocol: TCP
-          volumeMounts:
-            - name: seata-config
-              mountPath: /root/seata-config
-      volumes:
-        - name: seata-config
-          hostPath:
-            path: /root/workspace/seata/seata-config/file
-
-
- - - - - - - diff --git a/en-us/docs/ops/deploy-by-kubernetes.json b/en-us/docs/ops/deploy-by-kubernetes.json deleted file mode 100644 index c157bce3..00000000 --- a/en-us/docs/ops/deploy-by-kubernetes.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "filename": "deploy-by-kubernetes.md", - "__html": "

Deploy Seata Server By Kubernetes

\n

Quick Start

\n

Create file seata-server.yaml

\n
apiVersion: v1\nkind: Service\nmetadata:\n  name: seata-server\n  namespace: default\n  labels:\n    k8s-app: seata-server\nspec:\n  type: NodePort\n  ports:\n    - port: 8091\n      nodePort: 30091\n      protocol: TCP\n      name: http\n  selector:\n    k8s-app: seata-server\n\n---\n\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: seata-server\n  namespace: default\n  labels:\n    k8s-app: seata-server\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      k8s-app: seata-server\n  template:\n    metadata:\n      labels:\n        k8s-app: seata-server\n    spec:\n      containers:\n        - name: seata-server\n          image: docker.io/seataio/seata-server:latest\n          imagePullPolicy: IfNotPresent\n          env:\n            - name: SEATA_PORT\n              value: \"8091\"\n            - name: STORE_MODE\n              value: file\n          ports:\n            - name: http\n              containerPort: 8091\n              protocol: TCP\n
\n
$ kubectl apply -f seata-server.yaml\n
\n

Custom configuration

\n

Environment

\n

The environment is same with Docker, can reference Deploy Seata Server By Docker

\n

Use specify configuration file

\n

Can specify configuration file by mount files, like mount files under /root/workspace/seata/seata-config/file to pod. And need specify environment SEATA_CONFIG_NAME also, the value need start with file:, like file:/root/seata-config/registry

\n
    \n
  • Deployment
  • \n
\n
apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: seata-server\n  namespace: default\n  labels:\n    k8s-app: seata-server\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      k8s-app: seata-server\n  template:\n    metadata:\n      labels:\n        k8s-app: seata-server\n    spec:\n      containers:\n        - name: seata-server\n          image: docker.io/seataio/seata-server:latest\n          imagePullPolicy: IfNotPresent\n          env:\n            - name: SEATA_PORT\n              value: \"8091\"\n            - name: STORE_MODE\n              value: file\n            - name: SEATA_CONFIG_NAME\n              value: file:/root/seata-config/registry\n          ports:\n            - name: http\n              containerPort: 8091\n              protocol: TCP\n          volumeMounts:\n            - name: seata-config\n              mountPath: /root/seata-config\n      volumes:\n        - name: seata-config\n          hostPath:\n            path: /root/workspace/seata/seata-config/file\n
\n", - "link": "/en-us/docs/ops/deploy-by-kubernetes.html", - "meta": { - "hidden": "true", - "title": "Deploy Seata Server By Kubernetes", - "keywords": "kubernetes,ops", - "description": "Deploy Seata Server By Kubernetes", - "author": "helloworlde", - "date": "2019-12-01" - } -} \ No newline at end of file diff --git a/en-us/docs/ops/deploy-server.html b/en-us/docs/ops/deploy-server.html deleted file mode 100644 index ffa4a57b..00000000 --- a/en-us/docs/ops/deploy-server.html +++ /dev/null @@ -1,108 +0,0 @@ - - - - - - - - - - Deploy Server - - - - -
Documentation

Deploy Server

-

The server can deploy by multiple method: Directly, Docker, Docker-Compose, Kubernetes, Helm.

-

Directly

-
    -
  1. -

    Download the server application from RELEASE and unzip.

    -
  2. -
  3. -

    Startup

    -
  4. -
-

On Linux/Mac

-
$ sh ./bin/seata-server.sh
-
-

On Windows

-
bin\seata-server.bat
-
-

Arguments

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ArgumentFullnameEffectComment
-h--hostSpecify IP in registry centerSuggest to specify Virtural machine or cloud server, or will use internal IP
-p--portSpecify startup portdefault is 8091
-m--storeModeThe way to save transaction logSupport file and dbdefault is file
-n--serverNodeSpecify the seata-server node IDLike 1,2,3..., default is 1
-e--seataEnvSpecify the environment of seata-serverLike dev, test etc. Then will use file like registry-dev.conf as configuraiton
-

For example:

-
$ sh ./bin/seata-server.sh -p 8091 -h 127.0.0.1 -m file
-
-

Deploy in container

-

Now support these method:

- -
- - - - - - - diff --git a/en-us/docs/ops/deploy-server.json b/en-us/docs/ops/deploy-server.json deleted file mode 100644 index 48f85ffe..00000000 --- a/en-us/docs/ops/deploy-server.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "deploy-server.md", - "__html": "

Deploy Server

\n

The server can deploy by multiple method: Directly, Docker, Docker-Compose, Kubernetes, Helm.

\n

Directly

\n
    \n
  1. \n

    Download the server application from RELEASE and unzip.

    \n
  2. \n
  3. \n

    Startup

    \n
  4. \n
\n

On Linux/Mac

\n
$ sh ./bin/seata-server.sh\n
\n

On Windows

\n
bin\\seata-server.bat\n
\n

Arguments

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
ArgumentFullnameEffectComment
-h--hostSpecify IP in registry centerSuggest to specify Virtural machine or cloud server, or will use internal IP
-p--portSpecify startup portdefault is 8091
-m--storeModeThe way to save transaction logSupport file and dbdefault is file
-n--serverNodeSpecify the seata-server node IDLike 1,2,3..., default is 1
-e--seataEnvSpecify the environment of seata-serverLike dev, test etc. Then will use file like registry-dev.conf as configuraiton
\n

For example:

\n
$ sh ./bin/seata-server.sh -p 8091 -h 127.0.0.1 -m file\n
\n

Deploy in container

\n

Now support these method:

\n\n", - "link": "/en-us/docs/ops/deploy-server.html", - "meta": { - "title": "Deploy Server", - "keywords": "Seata", - "description": "The server can deploy by multiple method: Directly, Docker, Docker-Compose, Kubernetes, Helm." - } -} \ No newline at end of file diff --git a/en-us/docs/ops/multi-configuration-isolation.html b/en-us/docs/ops/multi-configuration-isolation.html deleted file mode 100644 index 776a45f0..00000000 --- a/en-us/docs/ops/multi-configuration-isolation.html +++ /dev/null @@ -1,66 +0,0 @@ - - - - - - - - - - Multi-configuration Isolation - - - - -
Documentation

Multi-configuration Isolation

-

Seata supports Multi-configuration Isolation since 0.6.1,You can configure it in the following steps.

-

use case

-

Suppose we now have a test environment in which we want to read only the configuration items corresponding to the test environment.

-

1.Environment Configuration

-

Seata provides two ways to set up different environments:

-
    -
  • -Denv=test,where test is the name of the environment.
  • -
-

-e.g.(Linux)
-
-sh seata-server.sh -Denv=test
-
-
    -
  • Use SEATA_CONFIG_ENV as the key of environment variable,and it's value will be the name of the environment.[recommended]
  • -
-

-e.g.(Linux)
-
-#vi /etc/profile 
-
-export SEATA_CONFIG_ENV=test
-
-:wq
-
-#source /etc/profile
-
-

2.Name the new configuration file

-
    -
  • Rename file.conf to file-env.conf,where env is the name of the environment. e.g. file-test.conf
  • -
  • Rename registry.conf to registry-env.conf,where env is the name of the environment. e.g. registry-test.conf
  • -
-

After all the steps have been set up, you can start using Seata configuration isolation.

-
- - - - - - - diff --git a/en-us/docs/ops/multi-configuration-isolation.json b/en-us/docs/ops/multi-configuration-isolation.json deleted file mode 100644 index 4892dbbc..00000000 --- a/en-us/docs/ops/multi-configuration-isolation.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "multi-configuration-isolation.md", - "__html": "

Multi-configuration Isolation

\n

Seata supports Multi-configuration Isolation since 0.6.1,You can configure it in the following steps.

\n

use case

\n

Suppose we now have a test environment in which we want to read only the configuration items corresponding to the test environment.

\n

1.Environment Configuration

\n

Seata provides two ways to set up different environments:

\n
    \n
  • -Denv=test,where test is the name of the environment.
  • \n
\n
\ne.g.(Linux)\n\nsh seata-server.sh -Denv=test\n
\n
    \n
  • Use SEATA_CONFIG_ENV as the key of environment variable,and it's value will be the name of the environment.[recommended]
  • \n
\n
\ne.g.(Linux)\n\n#vi /etc/profile \n\nexport SEATA_CONFIG_ENV=test\n\n:wq\n\n#source /etc/profile\n
\n

2.Name the new configuration file

\n
    \n
  • Rename file.conf to file-env.conf,where env is the name of the environment. e.g. file-test.conf
  • \n
  • Rename registry.conf to registry-env.conf,where env is the name of the environment. e.g. registry-test.conf
  • \n
\n

After all the steps have been set up, you can start using Seata configuration isolation.

\n", - "link": "/en-us/docs/ops/multi-configuration-isolation.html", - "meta": { - "title": "Multi-configuration Isolation", - "keywords": "Seata", - "description": "Seata supports Multi-configuration Isolation since 0.6.1,You can configure it in the following steps." - } -} \ No newline at end of file diff --git a/en-us/docs/overview/faq.html b/en-us/docs/overview/faq.html deleted file mode 100644 index 90b9e33d..00000000 --- a/en-us/docs/overview/faq.html +++ /dev/null @@ -1,137 +0,0 @@ - - - - - - - - - - Seata FAQ - - - - -
Documentation

FAQ

-

1.Can Seata be used in a production environment?

-

2.Dose Seata support high availability ?

-

3.What is the use of the record of undo log table log status = 1 ?

-

4.How to use the Seata framework to ensure transaction isolation?

-

5.When Failed to roll back dirty data, what shall I do ?

-

6.Why the global transaction state is not "begin" when a branch transaction is registered ?

-

7.When Nacos is used as the Seata configuration center, the project startup error report cannot find the service. How to check and deal with it ?

-

8.When Eureka is the registry and TC is highly available, how to overwrite Eureka properties at the TC end?

-

9.java.lang.NoSuchMethodError: com.fasterxml.jackson.databind.jsontype.TypeSerializer.typeId(Ljava/lang/Object;Lcom/fasterxml/jackson/core/JsonToken;)?

-

10. Why didn't my mybatis operation return auto-generated ID?

-
-

Q: 1.Can Seata be used in a production environment?

-

A: -Since version 0.4.2,it is supported in production environment,Users who are using seata are welcome to complete this issue together:who's using Seata

-
-

Q: 2.Dose Seata support high availability ?

-

A: -supported from version 0.6, tc USES db mode to share global transaction session information, and the registry USES non-file seata-supported third-party registries

-
-

Q: 3.What is the use of the record of undo log table log status = 1 ?

-

A:

-

Scenario: after A branch transaction A registers TC, A global transaction rollback occurs before A local transaction commits

-

Consequence: global transaction rollback succeeds, a resource is occupied, resulting in resource suspension problem

-

Anti-suspension measures: when a rolls back and finds that the rollback undo has not been inserted, an undo record with log_status=1 is inserted. When a local transaction (business write operation SQL and corresponding undo are a local transaction) is committed, it fails due to the primary key conflict of the undo table.

-
-

Q: 4.How to use the Seata framework to ensure transaction isolation?

-

A: -Since seata phase 1 local transactions have been committed, enhanced isolation is needed to prevent other transactions from dirty reads and dirty writes.

-
    -
  1. Dirty read Select statement with for update, proxy method with @GlobalLock or @GlobalTransaction
  2. -
  3. Dirty write You must use @globaltransaction -note:If the interface of the business you are querying does not use the @globaltransactional annotation, which means there is no need for distributed transactions on the method, you can annotate the @globallock annotation on the method and add a for update statement to the query. -If your query interface has the @globaltransactional annotation on the outer edge of the transactional link, you can simply add a for update statement to your query. The reason for designing this annotation is that before it is available, distributed transactions need to query the committed data, but the business does not need distributed transactions. -Using the GlobalTransactional annotation adds some unnecessary additional RPC overhead such as begin returning xid, commit transaction, etc. GlobalLock simplifies the RPC process for higher performance.
  4. -
-
-

Q: 5.When Failed to roll back dirty data, what shall I do ?

-

A:

-
    -
  1. The dirty data needs to be processed manually, and the data can be corrected according to the log prompt, or the corresponding undo can be deleted (the FailureHandler can be customized for email notification or other purposes).
  2. -
  3. This option is not recommended when "undo" mirror validation is turned off during rollback. -node:It is recommended to isolate the dirty data in advance
  4. -
-
-

Q: 6.Why the global transaction state is not "begin" when a branch transaction is registered ?

-

A:

-

abnormal: Could not register branch into global session xid = status = Rollbacked(Two phase state and Rollbacking, AsyncCommitting, etc) while expecting Begin

-

describe: When a branch transaction is registered, the global transaction status must be a one-phase state "begin", and registration other than "begin" is not allowed. It belongs to the normal processing at the seata framework level, and users can solve it from their own business level.

-

This exception can occur in the following situations (you can continue to add).

-
    -
  1. The branch transaction is asynchronous. The global transaction is not aware of its progress. The global transaction has entered phase 2 before the asynchronous branch comes to register.
  2. -
  3. Service a rpc service b timed out (dubbo, feign, etc. timeout by default for 1 second), a throws an exception to tm, tm informs tc to roll back, but b still receives the request (network delay or rpc framework retry), and then registers at tc Global transaction was found to be rolling back.
  4. -
  5. Tc is aware of the global transaction timeout (@globaltransactional (timeoutMills = default 60 seconds)), actively changes the state and notifies each branch transaction to rollback when a new branch transaction is registered.
  6. -
-
-

Q: 7.When Nacos is used as the Seata configuration center, the project startup error report cannot find the service. How to check and deal with it ?

-

A: -abnormal:io.seata.common.exception.FrameworkException: can not register RM,err:can not connect to services-server.

-
    -
  1. Check the nacos configuration list to see if the seata configuration has been imported successfully.
  2. -
  3. Check the list of nacos services to see if serverAddr has been registered successfully.
  4. -
  5. Check the namespace, registry.nacos.namespace and config.nacos.namespace in the client's registry.conf and enter the nacos namespace ID. The default is "". The server and client correspond to the namespace. -It is public and is a reserved control of nacos. If you need to create your own namespace, it is better not to use the same name as public, and use a name that has specific semantics in an actual business scenario.
  6. -
  7. For the list of services on nacos, the IP address corresponding to serverAddr address should be the IP address specified for seata startup, such as: sh seata-server.sh-p 8091-h 122.51.204.197-m file.
  8. -
  9. Check to see if the seata/conf/nacos-config.txt, transaction group service.vgroup_mapping.trade_group=default configuration is the same as the project group configuration name.
  10. -
  11. Telnet IP port view ports are open as well as firewall status. -note:1.Version 080 starts the specified IP problem, the exception "Exception in thread "main" java.lang.RuntimeException: java.net.BindException: Cannot assign request address", please upgrade to version 081 or above. -The project USES jdk13 and starts with
  12. -
-
   Error: Could not create the Java Virtual Machine
-   Error: A fatal exception has occurred. Program will exit.
-
-

​ If the environment is sh, replace the last paragraph in the script:

-
        exec "$JAVACMD" $JAVA_OPTS -server -Xmx2048m -Xms2048m -Xmn1024m -Xss512k -XX:SurvivorRatio=10 -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=256m -XX:
-MaxDirectMemorySize=1024m -XX:-OmitStackTraceInFastThrow -XX:-UseAdaptiveSizePolicy -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath="$BASEDIR"/logs
-/java_heapdump.hprof -XX:+DisableExplicitGC -XX:+CMSParallelRemarkEnabled -XX:+
-UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=75 -verbose:gc -Dio.netty.leakDetectionLevel=advanced \
-          -classpath "$CLASSPATH" \
-          -Dapp.name="seata-server" \
-          -Dapp.pid="$$" \
-          -Dapp.repo="$REPO" \
-          -Dapp.home="$BASEDIR" \
-          -Dbasedir="$BASEDIR" \
-          io.seata.server.Server \
-          "$@"
-
-
-

Q: 8.When Eureka is the registry and TC is highly available, how to overwrite Eureka properties at the TC end?

-

A: -Add the eureka-client.properties file in the seata\conf directory and add the Eureka properties to be overwritten. -For example, to overwrite eureka.instance.lease-renewal-interval-in-seconds and eureka.instance.lease-expiration-duration-in-seconds, add the following:

-
eureka.lease.renewalInterval=1  
-eureka.lease.duration=2
-
-

The attribute prefix is eureka, and the subsequent attribute names can refer to the class com.netflix.appinfo.PropertyBasedInstanceConfigConstants. You can also study the seata-discovery-eureka project of the discovery module in the seata source code.

-
-

Q: 9.What's the reason of java.lang.NoSuchMethodError: com.fasterxml.jackson.databind.jsontype.TypeSerializer.typeId(Ljava/lang/Object;Lcom/fasterxml/jackson/core/JsonToken;) ?

-

A: -when the undolog serialization is configured as Jackson, the Jackson version needs to be 2.9.9+

-
-

Q: 10. Why didn't my mybatis operation return auto-generated ID?

-

A: -You should update the configuraton of mybatis: set annotation @Options(useGeneratedKeys = true, keyProperty = "id") or set the value of useGeneratedKeys and keyProperty in mybatis xml configuraton

-
-
- - - - - - - diff --git a/en-us/docs/overview/faq.json b/en-us/docs/overview/faq.json deleted file mode 100644 index ae1aa58c..00000000 --- a/en-us/docs/overview/faq.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "faq.md", - "__html": "

FAQ

\n

1.Can Seata be used in a production environment?

\n

2.Dose Seata support high availability ?

\n

3.What is the use of the record of undo log table log status = 1 ?

\n

4.How to use the Seata framework to ensure transaction isolation?

\n

5.When Failed to roll back dirty data, what shall I do ?

\n

6.Why the global transaction state is not "begin" when a branch transaction is registered ?

\n

7.When Nacos is used as the Seata configuration center, the project startup error report cannot find the service. How to check and deal with it ?

\n

8.When Eureka is the registry and TC is highly available, how to overwrite Eureka properties at the TC end?

\n

9.java.lang.NoSuchMethodError: com.fasterxml.jackson.databind.jsontype.TypeSerializer.typeId(Ljava/lang/Object;Lcom/fasterxml/jackson/core/JsonToken;)?

\n

10. Why didn't my mybatis operation return auto-generated ID?

\n
\n

Q: 1.Can Seata be used in a production environment?

\n

A:\nSince version 0.4.2,it is supported in production environment,Users who are using seata are welcome to complete this issue together:who's using Seata

\n
\n

Q: 2.Dose Seata support high availability ?

\n

A:\nsupported from version 0.6, tc USES db mode to share global transaction session information, and the registry USES non-file seata-supported third-party registries

\n
\n

Q: 3.What is the use of the record of undo log table log status = 1 ?

\n

A:

\n

Scenario: after A branch transaction A registers TC, A global transaction rollback occurs before A local transaction commits

\n

Consequence: global transaction rollback succeeds, a resource is occupied, resulting in resource suspension problem

\n

Anti-suspension measures: when a rolls back and finds that the rollback undo has not been inserted, an undo record with log_status=1 is inserted. When a local transaction (business write operation SQL and corresponding undo are a local transaction) is committed, it fails due to the primary key conflict of the undo table.

\n
\n

Q: 4.How to use the Seata framework to ensure transaction isolation?

\n

A:\nSince seata phase 1 local transactions have been committed, enhanced isolation is needed to prevent other transactions from dirty reads and dirty writes.

\n
    \n
  1. Dirty read Select statement with for update, proxy method with @GlobalLock or @GlobalTransaction
  2. \n
  3. Dirty write You must use @globaltransaction\nnote:If the interface of the business you are querying does not use the @globaltransactional annotation, which means there is no need for distributed transactions on the method, you can annotate the @globallock annotation on the method and add a for update statement to the query.\nIf your query interface has the @globaltransactional annotation on the outer edge of the transactional link, you can simply add a for update statement to your query. The reason for designing this annotation is that before it is available, distributed transactions need to query the committed data, but the business does not need distributed transactions.\nUsing the GlobalTransactional annotation adds some unnecessary additional RPC overhead such as begin returning xid, commit transaction, etc. GlobalLock simplifies the RPC process for higher performance.
  4. \n
\n
\n

Q: 5.When Failed to roll back dirty data, what shall I do ?

\n

A:

\n
    \n
  1. The dirty data needs to be processed manually, and the data can be corrected according to the log prompt, or the corresponding undo can be deleted (the FailureHandler can be customized for email notification or other purposes).
  2. \n
  3. This option is not recommended when "undo" mirror validation is turned off during rollback.\nnode:It is recommended to isolate the dirty data in advance
  4. \n
\n
\n

Q: 6.Why the global transaction state is not \"begin\" when a branch transaction is registered ?

\n

A:

\n

abnormal: Could not register branch into global session xid = status = Rollbacked(Two phase state and Rollbacking, AsyncCommitting, etc) while expecting Begin

\n

describe: When a branch transaction is registered, the global transaction status must be a one-phase state "begin", and registration other than "begin" is not allowed. It belongs to the normal processing at the seata framework level, and users can solve it from their own business level.

\n

This exception can occur in the following situations (you can continue to add).

\n
    \n
  1. The branch transaction is asynchronous. The global transaction is not aware of its progress. The global transaction has entered phase 2 before the asynchronous branch comes to register.
  2. \n
  3. Service a rpc service b timed out (dubbo, feign, etc. timeout by default for 1 second), a throws an exception to tm, tm informs tc to roll back, but b still receives the request (network delay or rpc framework retry), and then registers at tc Global transaction was found to be rolling back.
  4. \n
  5. Tc is aware of the global transaction timeout (@globaltransactional (timeoutMills = default 60 seconds)), actively changes the state and notifies each branch transaction to rollback when a new branch transaction is registered.
  6. \n
\n
\n

Q: 7.When Nacos is used as the Seata configuration center, the project startup error report cannot find the service. How to check and deal with it ?

\n

A:\nabnormal:io.seata.common.exception.FrameworkException: can not register RM,err:can not connect to services-server.

\n
    \n
  1. Check the nacos configuration list to see if the seata configuration has been imported successfully.
  2. \n
  3. Check the list of nacos services to see if serverAddr has been registered successfully.
  4. \n
  5. Check the namespace, registry.nacos.namespace and config.nacos.namespace in the client's registry.conf and enter the nacos namespace ID. The default is "". The server and client correspond to the namespace.\nIt is public and is a reserved control of nacos. If you need to create your own namespace, it is better not to use the same name as public, and use a name that has specific semantics in an actual business scenario.
  6. \n
  7. For the list of services on nacos, the IP address corresponding to serverAddr address should be the IP address specified for seata startup, such as: sh seata-server.sh-p 8091-h 122.51.204.197-m file.
  8. \n
  9. Check to see if the seata/conf/nacos-config.txt, transaction group service.vgroup_mapping.trade_group=default configuration is the same as the project group configuration name.
  10. \n
  11. Telnet IP port view ports are open as well as firewall status.\nnote:1.Version 080 starts the specified IP problem, the exception "Exception in thread "main" java.lang.RuntimeException: java.net.BindException: Cannot assign request address", please upgrade to version 081 or above.\nThe project USES jdk13 and starts with
  12. \n
\n
   Error: Could not create the Java Virtual Machine\n   Error: A fatal exception has occurred. Program will exit.\n
\n

​ If the environment is sh, replace the last paragraph in the script:

\n
        exec \"$JAVACMD\" $JAVA_OPTS -server -Xmx2048m -Xms2048m -Xmn1024m -Xss512k -XX:SurvivorRatio=10 -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=256m -XX:\nMaxDirectMemorySize=1024m -XX:-OmitStackTraceInFastThrow -XX:-UseAdaptiveSizePolicy -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=\"$BASEDIR\"/logs\n/java_heapdump.hprof -XX:+DisableExplicitGC -XX:+CMSParallelRemarkEnabled -XX:+\nUseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=75 -verbose:gc -Dio.netty.leakDetectionLevel=advanced \\\n          -classpath \"$CLASSPATH\" \\\n          -Dapp.name=\"seata-server\" \\\n          -Dapp.pid=\"$$\" \\\n          -Dapp.repo=\"$REPO\" \\\n          -Dapp.home=\"$BASEDIR\" \\\n          -Dbasedir=\"$BASEDIR\" \\\n          io.seata.server.Server \\\n          \"$@\"\n
\n
\n

Q: 8.When Eureka is the registry and TC is highly available, how to overwrite Eureka properties at the TC end?

\n

A:\nAdd the eureka-client.properties file in the seata\\conf directory and add the Eureka properties to be overwritten.\nFor example, to overwrite eureka.instance.lease-renewal-interval-in-seconds and eureka.instance.lease-expiration-duration-in-seconds, add the following:

\n
eureka.lease.renewalInterval=1  \neureka.lease.duration=2\n
\n

The attribute prefix is eureka, and the subsequent attribute names can refer to the class com.netflix.appinfo.PropertyBasedInstanceConfigConstants. You can also study the seata-discovery-eureka project of the discovery module in the seata source code.

\n
\n

Q: 9.What's the reason of java.lang.NoSuchMethodError: com.fasterxml.jackson.databind.jsontype.TypeSerializer.typeId(Ljava/lang/Object;Lcom/fasterxml/jackson/core/JsonToken;) ?

\n

A:\nwhen the undolog serialization is configured as Jackson, the Jackson version needs to be 2.9.9+

\n
\n

Q: 10. Why didn't my mybatis operation return auto-generated ID?

\n

A:\nYou should update the configuraton of mybatis: set annotation @Options(useGeneratedKeys = true, keyProperty = "id") or set the value of useGeneratedKeys and keyProperty in mybatis xml configuraton

\n
\n", - "link": "/en-us/docs/overview/faq.html", - "meta": { - "title": "Seata FAQ", - "keywords": "Seata", - "description": "Seata FAQ." - } -} \ No newline at end of file diff --git a/en-us/docs/overview/terminology.html b/en-us/docs/overview/terminology.html deleted file mode 100644 index b8319bca..00000000 --- a/en-us/docs/overview/terminology.html +++ /dev/null @@ -1,39 +0,0 @@ - - - - - - - - - - Seata Terminology - - - - -
Documentation

Seata Terminology

-

TC - Transaction Coordinator

-

Maintain status of global and branch transactions, drive the global commit or rollback.

-

TM - Transaction Manager

-

Define the scope of global transaction: begin a global transaction, commit or rollback a global transaction.

-

RM - Resource Manager

-

Manage resources that branch transactions working on, talk to TC for registering branch transactions and reporting status of branch transactions, and drive the branch transaction commit or rollback.

-
- - - - - - - diff --git a/en-us/docs/overview/terminology.json b/en-us/docs/overview/terminology.json deleted file mode 100644 index f90e32d8..00000000 --- a/en-us/docs/overview/terminology.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "terminology.md", - "__html": "

Seata Terminology

\n

TC - Transaction Coordinator

\n

Maintain status of global and branch transactions, drive the global commit or rollback.

\n

TM - Transaction Manager

\n

Define the scope of global transaction: begin a global transaction, commit or rollback a global transaction.

\n

RM - Resource Manager

\n

Manage resources that branch transactions working on, talk to TC for registering branch transactions and reporting status of branch transactions, and drive the branch transaction commit or rollback.

\n", - "link": "/en-us/docs/overview/terminology.html", - "meta": { - "title": "Seata Terminology", - "keywords": "Seata", - "description": "Seata Terminology." - } -} \ No newline at end of file diff --git a/en-us/docs/overview/what-is-seata.html b/en-us/docs/overview/what-is-seata.html deleted file mode 100644 index 0b5844b1..00000000 --- a/en-us/docs/overview/what-is-seata.html +++ /dev/null @@ -1,318 +0,0 @@ - - - - - - - - - - What Is Seata - - - - -
Documentation

What Is Seata?

-

Seata is an open source distributed transaction solution dedicated to providing high performance and easy to use distributed transaction services. Seata will provide users with AT, TCC, SAGA, and XA transaction models to create a one-stop distributed solution for users.

-

AT Mode

-

Prerequisite

-
    -
  • Relational databases that support local ACID transaction.
  • -
  • Java applications that access database via JDBC.
  • -
-

Overall mechanism

-

Evolution from the two phases commit protocol:

-
    -
  • Phase 1:commit business data and rollback log in the same local transaction, then release local lock and connection resources.
  • -
  • Phase 2: -
      -
    • for commit case, do the work asynchronously and quickly.
    • -
    • for rollback case, do compensation, base on the rollback log created in the phase 1.
    • -
    -
  • -
-

Write isolation

-
    -
  • The global lock must be acquired before committing the local transaction of phase 1.
  • -
  • If the global lock is not acquired, the local transaction should not be committed.
  • -
  • One transaction will try to acquire the global lock many times if it fails to, but there is a timeout, if it's timeout, rollback local transaction and release local lock as well.
  • -
-

For example:

-

Two transactions tx1 and tx2 are trying to update field m of table a. The original value of m is 1000.

-

tx1 starts first, begins a local transaction, acquires the local lock, do the update operation: m = 1000 - 100 = 900. tx1 must acquire the global lock before committing the local transaction, after that, commit local transaction and release local lock.

-

next, tx2 begins local transaction, acquires local lock, do the update operation: m = 900 - 100 = 800. Before tx2 can commit local transaction, it must acquire the global lock, but the global lock may be hold by tx1, so tx2 will do retry. After tx1 does the global commit and releases the global lock, tx2 can acquire the global lock, then it can commit local transaction and release local lock.

-

Write-Isolation: Commit

-

See the figure above, tx1 does the global commit in phase 2 and release the global lock, tx2 acquires the global lock and commits local transaction.

-

Write-Isolation: Rollback

-

See the figure above, if tx1 wants to do the global rollback, it must acquire local lock to revert the update operation of phase 1.

-

However, now the local lock is held by tx2 which hopes to acquire the global lock, so tx1 fails to rollback, but it would try it many times until it's timeout for tx2 to acquire the global lock, then tx2 rollbacks local transaction and releases local lock, after that, tx1 can acquire the local lock, and do the branch rollback successfully.

-

Because the global lock is held by tx1 during the whole process, there isn't no problem of dirty write.

-

Read isolation

-

The isolation level of local database is read committed or above, so the default isolation level of the global transaction is read uncommitted.

-

If it needs the isolation level of the global transaction is read committed, currently, Seata implements it via SELECT FOR UPDATE statement.

-

Read Isolation: SELECT FOR UPDATE

-

The global lock is be applied during the execution of SELECT FOR UPDATE statement, if the global lock is held by other transactions, the transaction will release local lock retry execute the SELECT FOR UPDATE statement. During the whole process, the query is blocked until the global lock is acquired, if the lock is acquired, it means the other global transaction has committed, so the isolation level of global transaction is read committed.

-

For the performance consideration, Seata only does proxy work for SELECT FOR UPDATE. For the general SELECT statement, do nothing.

-

Work process

-

Take an example to illustrate it.

-

A business table:product

- - - - - - - - - - - - - - - - - - - - - - - - - -
FieldTypeKey
idbigint(20)PRI
namevarchar(100)
sincevarchar(100)
-

The sql of branch transaction in AT mode:

-
update product set name = 'GTS' where name = 'TXC';
-
-

Phase 1

-

Process:

-
    -
  1. Parse sql: know the sql type is update operation, table name is product, the where condition is name = 'TXC' and so on.
  2. -
  3. Query the data before update(Named before image): In order to locate the data that will be updated, generate a query statement by the where condition above.
  4. -
-
select id, name, since from product where name = 'TXC';
-
-

Got the "before image":

- - - - - - - - - - - - - - - -
idnamesince
1TXC2014
-
    -
  1. Execute the update sql: update the record of name equals 'GTS'.
  2. -
  3. Query the data after update(Named after image): locate the record by the primary key of image data before update.
  4. -
-
select id, name, since from product where id = 1;
-
-

Got the after image:

- - - - - - - - - - - - - - - -
idnamesince
1GTS2014
-
    -
  1. Insert a rollback log: build the rollback log with image before and after, as well as SQL statement relelated information, then insert into table UNDO_LOG .
  2. -
-
{
-	"branchId": 641789253,
-	"undoItems": [{
-		"afterImage": {
-			"rows": [{
-				"fields": [{
-					"name": "id",
-					"type": 4,
-					"value": 1
-				}, {
-					"name": "name",
-					"type": 12,
-					"value": "GTS"
-				}, {
-					"name": "since",
-					"type": 12,
-					"value": "2014"
-				}]
-			}],
-			"tableName": "product"
-		},
-		"beforeImage": {
-			"rows": [{
-				"fields": [{
-					"name": "id",
-					"type": 4,
-					"value": 1
-				}, {
-					"name": "name",
-					"type": 12,
-					"value": "TXC"
-				}, {
-					"name": "since",
-					"type": 12,
-					"value": "2014"
-				}]
-			}],
-			"tableName": "product"
-		},
-		"sqlType": "UPDATE"
-	}],
-	"xid": "xid:xxx"
-}
-
-
    -
  1. Before local commit, the transaction submmit an application to TC to acquire a global lock for the record whose primary key equals 1 in the table product.
  2. -
  3. Commit local transaction: commit the update of PRODUCT table and the insert of UNDO_LOG table in the same local transaction.
  4. -
  5. Report the result of step 7 to TC.
  6. -
-

Phase 2 - Rollback case

-
    -
  1. After receive the rollback request from TC, begin a local transaction, execute operation as following.
  2. -
  3. Retrieve the UNDO LOG by XID and Branch ID.
  4. -
  5. Validate data: Compare the image data after update in UNDO LOG with current data, if there is difference, it means the data has been changed by operation out of current transaction, it should be handled in different policy, we will describe it detailedly in other document.
  6. -
  7. Generate rollback SQL statement base on before image in UNDO LOG and related information of the business SQL.
  8. -
-
update product set name = 'TXC' where id = 1;
-
-
    -
  1. Commit local transaction, report the result of execution of local transaction(The rollback result of the Branch transaction) to TC.
  2. -
-

Phase 2 - Commit case

-
    -
  1. After receive the commit request from TC, put the request into a work queue, return success to TC immediately.
  2. -
  3. During the phase of doing the asynchronous work in the queue, the UNDO LOGs are deleted in batch way.
  4. -
-

Appendix

-

Undo log table

-

UNDO_LOG Table:there is a little bit difference on the data type for different databases.

-

For MySQL example:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldType
branch_idbigint PK
xidvarchar(100)
contextvarchar(128)
rollback_infolongblob
log_statustinyint
log_createddatetime
log_modifieddatetime
-
-- Note that 0.7.0+ adds the field context
-CREATE TABLE `undo_log` (
-  `id` bigint(20) NOT NULL AUTO_INCREMENT,
-  `branch_id` bigint(20) NOT NULL,
-  `xid` varchar(100) NOT NULL,
-  `context` varchar(128) NOT NULL,
-  `rollback_info` longblob NOT NULL,
-  `log_status` int(11) NOT NULL,
-  `log_created` datetime NOT NULL,
-  `log_modified` datetime NOT NULL,
-  PRIMARY KEY (`id`),
-  UNIQUE KEY `ux_undo_log` (`xid`,`branch_id`)
-) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-
-

TCC Mode

-

Review the description in the overview: A distributed global transaction, the whole is a two-phase commit model. The global transaction is composed of several branch transactions. The branch transaction must meet the requirements of the two-phase commit model, that is, each branch transaction must have its own:

-
    -
  • One-stage prepare behavior
  • -
  • Two-phase commit or rollback behavior
  • -
-

Overview of a global transaction

-

According to the two-phase behavior mode, we divide branch transactions into Automatic (Branch) Transaction Mode and TCC (Branch) Transaction Mode.

-

The AT mode (Reference Link TBD) is based on a relational database that supports local ACID transactions:

-
    -
  • One-stage prepare behavior: In local transactions, business data updates and corresponding rollback log records are submitted together.
  • -
  • Two-phase commit behavior: Immediately completed successfully, automatically asynchronously clean up the rollback log.
  • -
  • Two-phase rollback behavior: Through the rollback log, automatically generates compensation operations to complete data rollback.
  • -
-

Correspondingly, the TCC mode does not rely on transaction support of the underlying data resources:

-
    -
  • One-stage prepare behavior: Call the custom prepare logic.
  • -
  • Two-phase commit behavior: Call custom commit logic.
  • -
  • Two-phase rollback behavior: Call the custom rollback logic.
  • -
-

The so-called TCC mode refers to the support of customized's branch transactions into the management of global transactions.

-

Saga Mode

-

The Saga model is a long transaction solution provided by SEATA. In the Saga model, each participant in the business process submits a local transaction. When a participant fails, the previous successful participant is compensated. One stage is positive serving and The two-stage compensation services are implemented by business development.

-

Saga mode diagram

-

Theoretical basis: Hector & Kenneth Post a comment Sagas (1987)

-

Applicable scene:

-
    -
  • Long business processes, many business processes
  • -
  • Participants include other company or legacy system services and cannot provide the three interfaces required by the TCC model
  • -
-

Advantage:

-
    -
  • Commit local transactions in one phase, lock-free, high performance
  • -
  • Event-driven architecture, participants can execute asynchronously, high throughput
  • -
  • Compensation services are easy to implement
  • -
-

Disadvantages:

- -
- - - - - - - diff --git a/en-us/docs/overview/what-is-seata.json b/en-us/docs/overview/what-is-seata.json deleted file mode 100644 index 19477813..00000000 --- a/en-us/docs/overview/what-is-seata.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "what-is-seata.md", - "__html": "

What Is Seata?

\n

Seata is an open source distributed transaction solution dedicated to providing high performance and easy to use distributed transaction services. Seata will provide users with AT, TCC, SAGA, and XA transaction models to create a one-stop distributed solution for users.

\n

AT Mode

\n

Prerequisite

\n
    \n
  • Relational databases that support local ACID transaction.
  • \n
  • Java applications that access database via JDBC.
  • \n
\n

Overall mechanism

\n

Evolution from the two phases commit protocol:

\n
    \n
  • Phase 1:commit business data and rollback log in the same local transaction, then release local lock and connection resources.
  • \n
  • Phase 2:\n
      \n
    • for commit case, do the work asynchronously and quickly.
    • \n
    • for rollback case, do compensation, base on the rollback log created in the phase 1.
    • \n
    \n
  • \n
\n

Write isolation

\n
    \n
  • The global lock must be acquired before committing the local transaction of phase 1.
  • \n
  • If the global lock is not acquired, the local transaction should not be committed.
  • \n
  • One transaction will try to acquire the global lock many times if it fails to, but there is a timeout, if it's timeout, rollback local transaction and release local lock as well.
  • \n
\n

For example:

\n

Two transactions tx1 and tx2 are trying to update field m of table a. The original value of m is 1000.

\n

tx1 starts first, begins a local transaction, acquires the local lock, do the update operation: m = 1000 - 100 = 900. tx1 must acquire the global lock before committing the local transaction, after that, commit local transaction and release local lock.

\n

next, tx2 begins local transaction, acquires local lock, do the update operation: m = 900 - 100 = 800. Before tx2 can commit local transaction, it must acquire the global lock, but the global lock may be hold by tx1, so tx2 will do retry. After tx1 does the global commit and releases the global lock, tx2 can acquire the global lock, then it can commit local transaction and release local lock.

\n

\"Write-Isolation:

\n

See the figure above, tx1 does the global commit in phase 2 and release the global lock, tx2 acquires the global lock and commits local transaction.

\n

\"Write-Isolation:

\n

See the figure above, if tx1 wants to do the global rollback, it must acquire local lock to revert the update operation of phase 1.

\n

However, now the local lock is held by tx2 which hopes to acquire the global lock, so tx1 fails to rollback, but it would try it many times until it's timeout for tx2 to acquire the global lock, then tx2 rollbacks local transaction and releases local lock, after that, tx1 can acquire the local lock, and do the branch rollback successfully.

\n

Because the global lock is held by tx1 during the whole process, there isn't no problem of dirty write.

\n

Read isolation

\n

The isolation level of local database is read committed or above, so the default isolation level of the global transaction is read uncommitted.

\n

If it needs the isolation level of the global transaction is read committed, currently, Seata implements it via SELECT FOR UPDATE statement.

\n

\"Read

\n

The global lock is be applied during the execution of SELECT FOR UPDATE statement, if the global lock is held by other transactions, the transaction will release local lock retry execute the SELECT FOR UPDATE statement. During the whole process, the query is blocked until the global lock is acquired, if the lock is acquired, it means the other global transaction has committed, so the isolation level of global transaction is read committed.

\n

For the performance consideration, Seata only does proxy work for SELECT FOR UPDATE. For the general SELECT statement, do nothing.

\n

Work process

\n

Take an example to illustrate it.

\n

A business table:product

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
FieldTypeKey
idbigint(20)PRI
namevarchar(100)
sincevarchar(100)
\n

The sql of branch transaction in AT mode:

\n
update product set name = 'GTS' where name = 'TXC';\n
\n

Phase 1

\n

Process:

\n
    \n
  1. Parse sql: know the sql type is update operation, table name is product, the where condition is name = 'TXC' and so on.
  2. \n
  3. Query the data before update(Named before image): In order to locate the data that will be updated, generate a query statement by the where condition above.
  4. \n
\n
select id, name, since from product where name = 'TXC';\n
\n

Got the "before image":

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
idnamesince
1TXC2014
\n
    \n
  1. Execute the update sql: update the record of name equals 'GTS'.
  2. \n
  3. Query the data after update(Named after image): locate the record by the primary key of image data before update.
  4. \n
\n
select id, name, since from product where id = 1;\n
\n

Got the after image:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
idnamesince
1GTS2014
\n
    \n
  1. Insert a rollback log: build the rollback log with image before and after, as well as SQL statement relelated information, then insert into table UNDO_LOG .
  2. \n
\n
{\n\t\"branchId\": 641789253,\n\t\"undoItems\": [{\n\t\t\"afterImage\": {\n\t\t\t\"rows\": [{\n\t\t\t\t\"fields\": [{\n\t\t\t\t\t\"name\": \"id\",\n\t\t\t\t\t\"type\": 4,\n\t\t\t\t\t\"value\": 1\n\t\t\t\t}, {\n\t\t\t\t\t\"name\": \"name\",\n\t\t\t\t\t\"type\": 12,\n\t\t\t\t\t\"value\": \"GTS\"\n\t\t\t\t}, {\n\t\t\t\t\t\"name\": \"since\",\n\t\t\t\t\t\"type\": 12,\n\t\t\t\t\t\"value\": \"2014\"\n\t\t\t\t}]\n\t\t\t}],\n\t\t\t\"tableName\": \"product\"\n\t\t},\n\t\t\"beforeImage\": {\n\t\t\t\"rows\": [{\n\t\t\t\t\"fields\": [{\n\t\t\t\t\t\"name\": \"id\",\n\t\t\t\t\t\"type\": 4,\n\t\t\t\t\t\"value\": 1\n\t\t\t\t}, {\n\t\t\t\t\t\"name\": \"name\",\n\t\t\t\t\t\"type\": 12,\n\t\t\t\t\t\"value\": \"TXC\"\n\t\t\t\t}, {\n\t\t\t\t\t\"name\": \"since\",\n\t\t\t\t\t\"type\": 12,\n\t\t\t\t\t\"value\": \"2014\"\n\t\t\t\t}]\n\t\t\t}],\n\t\t\t\"tableName\": \"product\"\n\t\t},\n\t\t\"sqlType\": \"UPDATE\"\n\t}],\n\t\"xid\": \"xid:xxx\"\n}\n
\n
    \n
  1. Before local commit, the transaction submmit an application to TC to acquire a global lock for the record whose primary key equals 1 in the table product.
  2. \n
  3. Commit local transaction: commit the update of PRODUCT table and the insert of UNDO_LOG table in the same local transaction.
  4. \n
  5. Report the result of step 7 to TC.
  6. \n
\n

Phase 2 - Rollback case

\n
    \n
  1. After receive the rollback request from TC, begin a local transaction, execute operation as following.
  2. \n
  3. Retrieve the UNDO LOG by XID and Branch ID.
  4. \n
  5. Validate data: Compare the image data after update in UNDO LOG with current data, if there is difference, it means the data has been changed by operation out of current transaction, it should be handled in different policy, we will describe it detailedly in other document.
  6. \n
  7. Generate rollback SQL statement base on before image in UNDO LOG and related information of the business SQL.
  8. \n
\n
update product set name = 'TXC' where id = 1;\n
\n
    \n
  1. Commit local transaction, report the result of execution of local transaction(The rollback result of the Branch transaction) to TC.
  2. \n
\n

Phase 2 - Commit case

\n
    \n
  1. After receive the commit request from TC, put the request into a work queue, return success to TC immediately.
  2. \n
  3. During the phase of doing the asynchronous work in the queue, the UNDO LOGs are deleted in batch way.
  4. \n
\n

Appendix

\n

Undo log table

\n

UNDO_LOG Table:there is a little bit difference on the data type for different databases.

\n

For MySQL example:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
FieldType
branch_idbigint PK
xidvarchar(100)
contextvarchar(128)
rollback_infolongblob
log_statustinyint
log_createddatetime
log_modifieddatetime
\n
-- Note that 0.7.0+ adds the field context\nCREATE TABLE `undo_log` (\n  `id` bigint(20) NOT NULL AUTO_INCREMENT,\n  `branch_id` bigint(20) NOT NULL,\n  `xid` varchar(100) NOT NULL,\n  `context` varchar(128) NOT NULL,\n  `rollback_info` longblob NOT NULL,\n  `log_status` int(11) NOT NULL,\n  `log_created` datetime NOT NULL,\n  `log_modified` datetime NOT NULL,\n  PRIMARY KEY (`id`),\n  UNIQUE KEY `ux_undo_log` (`xid`,`branch_id`)\n) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;\n
\n

TCC Mode

\n

Review the description in the overview: A distributed global transaction, the whole is a two-phase commit model. The global transaction is composed of several branch transactions. The branch transaction must meet the requirements of the two-phase commit model, that is, each branch transaction must have its own:

\n
    \n
  • One-stage prepare behavior
  • \n
  • Two-phase commit or rollback behavior
  • \n
\n

\"Overview

\n

According to the two-phase behavior mode, we divide branch transactions into Automatic (Branch) Transaction Mode and TCC (Branch) Transaction Mode.

\n

The AT mode (Reference Link TBD) is based on a relational database that supports local ACID transactions:

\n
    \n
  • One-stage prepare behavior: In local transactions, business data updates and corresponding rollback log records are submitted together.
  • \n
  • Two-phase commit behavior: Immediately completed successfully, automatically asynchronously clean up the rollback log.
  • \n
  • Two-phase rollback behavior: Through the rollback log, automatically generates compensation operations to complete data rollback.
  • \n
\n

Correspondingly, the TCC mode does not rely on transaction support of the underlying data resources:

\n
    \n
  • One-stage prepare behavior: Call the custom prepare logic.
  • \n
  • Two-phase commit behavior: Call custom commit logic.
  • \n
  • Two-phase rollback behavior: Call the custom rollback logic.
  • \n
\n

The so-called TCC mode refers to the support of customized's branch transactions into the management of global transactions.

\n

Saga Mode

\n

The Saga model is a long transaction solution provided by SEATA. In the Saga model, each participant in the business process submits a local transaction. When a participant fails, the previous successful participant is compensated. One stage is positive serving and The two-stage compensation services are implemented by business development.

\n

\"Saga

\n

Theoretical basis: Hector & Kenneth Post a comment Sagas (1987)

\n

Applicable scene:

\n
    \n
  • Long business processes, many business processes
  • \n
  • Participants include other company or legacy system services and cannot provide the three interfaces required by the TCC model
  • \n
\n

Advantage:

\n
    \n
  • Commit local transactions in one phase, lock-free, high performance
  • \n
  • Event-driven architecture, participants can execute asynchronously, high throughput
  • \n
  • Compensation services are easy to implement
  • \n
\n

Disadvantages:

\n\n", - "link": "/en-us/docs/overview/what-is-seata.html", - "meta": { - "title": "What Is Seata", - "keywords": "Seata", - "description": "Seata is an open source distributed transaction solution dedicated to providing high performance and easy to use distributed transaction services. Seata will provide users with AT, TCC, SAGA, and XA transaction models to create a one-stop distributed solution for users." - } -} \ No newline at end of file diff --git a/en-us/docs/overview/what_is_seata.html b/en-us/docs/overview/what_is_seata.html deleted file mode 100644 index f0596d5b..00000000 --- a/en-us/docs/overview/what_is_seata.html +++ /dev/null @@ -1,24 +0,0 @@ - - - - - - - - - - what_is_seata - - - - -
Documentation

What Is Seata?

-
- - - - - - diff --git a/en-us/docs/overview/what_is_seata.json b/en-us/docs/overview/what_is_seata.json deleted file mode 100644 index 0c6287f3..00000000 --- a/en-us/docs/overview/what_is_seata.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "filename": "what_is_seata.md", - "__html": "

What Is Seata?

\n", - "link": "/en-us/docs/overview/what_is_seata.html", - "meta": {} -} \ No newline at end of file diff --git a/en-us/docs/user/api.html b/en-us/docs/user/api.html deleted file mode 100644 index d14eb9ef..00000000 --- a/en-us/docs/user/api.html +++ /dev/null @@ -1,247 +0,0 @@ - - - - - - - - - - Api Guide - - - - -
Documentation

1. Overview

-

Seata API is devided into 2 categories: High-Level API and Low-Level API

-
    -
  • High-Level API : Used for defining and controlling transaction boundary, and querying transaction status.
  • -
  • Low-Level API : Used for controlling the propagation of transaction context.
  • -
-

2. High-Level API

-

2.1 GlobalTransaction

-

GlobalTransaction class contains methods about begin transaction, commit transaction, rollback transaction and get status of transaction and so on.

-
public interface GlobalTransaction {
-
-    /**
-     * Begin a global transaction(Use default transaction name and timeout)
-     */
-    void begin() throws TransactionException;
-
-    /**
-     * Begin a global transaction, and point out the timeout(use default transaction name)
-     */
-    void begin(int timeout) throws TransactionException;
-
-    /**
-     * Begin a global transaction, and point out the transaction name and timeout.
-     */
-    void begin(int timeout, String name) throws TransactionException;
-
-    /**
-     * Commit globally
-     */
-    void commit() throws TransactionException;
-
-    /**
-     * Rollback globally
-     */
-    void rollback() throws TransactionException;
-
-    /**
-     * Get the status of transaction
-     */
-    GlobalStatus getStatus() throws TransactionException;
-
-    /**
-     * Get the XID of transaction
-     */
-    String getXid();
-
-}
-
-

2.2 GlobalTransactionContext

-

GlobalTransaction instance can be retrieved from GlobalTransactionContext:

-

-    /**
-     * Retrieve current global transaction instance, if it doesn't exist, create a new one.
-     */
-    public static GlobalTransaction getCurrentOrCreate() {
-        GlobalTransaction tx = getCurrent();
-        if (tx == null) {
-            return createNew();
-        }
-        return tx;
-    }
-
-    /**
-     * Reload the global transaction identified by XID, the instance aren't allowed to begin transaction.
-     * This API is usually used for centralized handling of failed transaction later.
-     * For example, if it's time out to commit globally, the subsequent centralized processing steps are like this: reload the instance, from which retrieve the status, then recommit the transaction globally or not depends on the status value.
-     */
-    public static GlobalTransaction reload(String xid) throws TransactionException {
-        GlobalTransaction tx = new DefaultGlobalTransaction(xid, GlobalStatus.UnKnown, GlobalTransactionRole.Launcher) {
-            @Override
-            public void begin(int timeout, String name) throws TransactionException {
-                throw new IllegalStateException("Never BEGIN on a RELOADED GlobalTransaction. ");
-            }
-        };
-        return tx;
-    }
-
-

2.3 TransactionalTemplate

-

TransactionalTemplate: Wrap a business service invoke into a distributed transaction supported service with preceding GlobalTransaction and GlobalTransactionContext API.

-
public class TransactionalTemplate {
-
-    public Object execute(TransactionalExecutor business) throws TransactionalExecutor.ExecutionException {
-
-        // 1. Get current global transaction instance or create a new one
-        GlobalTransaction tx = GlobalTransactionContext.getCurrentOrCreate();
-
-        // 2. Begin the global transaction
-        try {
-            tx.begin(business.timeout(), business.name());
-
-        } catch (TransactionException txe) {
-            // 2.1 Fail to begin
-            throw new TransactionalExecutor.ExecutionException(tx, txe,
-                TransactionalExecutor.Code.BeginFailure);
-
-        }
-
-        Object rs = null;
-        try {
-            // 3. invoke service
-            rs = business.execute();
-
-        } catch (Throwable ex) {
-
-            // Exception from business service invoke
-            try {
-                // Rollback globally
-                tx.rollback();
-
-                // 3.1 Global rollback success, throw original business exception
-                throw new TransactionalExecutor.ExecutionException(tx, TransactionalExecutor.Code.RollbackDone, ex);
-
-            } catch (TransactionException txe) {
-                // 3.2 Global rollback failed
-                throw new TransactionalExecutor.ExecutionException(tx, txe,
-                    TransactionalExecutor.Code.RollbackFailure, ex);
-
-            }
-
-        }
-
-        // 4. Commit globally
-        try {
-            tx.commit();
-
-        } catch (TransactionException txe) {
-            // 4.1 Global commit failed
-            throw new TransactionalExecutor.ExecutionException(tx, txe,
-                TransactionalExecutor.Code.CommitFailure);
-
-        }
-        return rs;
-    }
-
-}
-
-

The exception of template method: ExecutionException

-
    class ExecutionException extends Exception {
-
-        // Transaction instance threw exception
-        private GlobalTransaction transaction;
-
-        // Exception code:
-        // BeginFailure(Fail to begin transaction)
-        // CommitFailure(Fail to commit globally)
-        // RollbackFailure(Fail to rollback globally)
-        // RollbackDone(Global rollback success)
-        private Code code;
-
-        // Original exception triggered by rollback
-        private Throwable originalException;
-
-

Outer calling logic try-catch the exception, and do something based on the exception code:

-
    -
  • BeginFailure (Fail to begin transaction): getCause() gets the framework exception of begin transaction, getOriginalException() is null.
  • -
  • CommitFailure(Fail to commit globally): getCause() gets the framework exception of commit transaction, getOriginalException() is null.
  • -
  • RollbackFailure (Fail to rollback globally):getCause() gets the framework exception of rollback transaction,getOriginalException() gets the original exception of business invoke.
  • -
  • RollbackDone(Global rollback success): getCause() is null, getOriginalException() gets the original exception of business invoke.
  • -
-

3. Low-Level API

-

3.1 RootContext

-

RootContext: It's responsible for maintaining XID during runtime of application.

-
    /**
-     * Get the global XID of the current running application
-     */
-    public static String getXID() {
-        return CONTEXT_HOLDER.get(KEY_XID);
-    }
-
-    /**
-     * Bind the global XID to the current application runtime
-     */
-    public static void bind(String xid) {
-        if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug("bind " + xid);
-        }
-        CONTEXT_HOLDER.put(KEY_XID, xid);
-    }
-
-    /**
-     * Unbind the global XID from the current application runtime, and return XID
-     */
-    public static String unbind() {
-        String xid = CONTEXT_HOLDER.remove(KEY_XID);
-        if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug("unbind " + xid);
-        }
-        return xid;
-    }
-
-    /**
-     * Check if the current application runtime is in the global transaction context
-     */
-    public static boolean inGlobalTransaction() {
-        return CONTEXT_HOLDER.get(KEY_XID) != null;
-    }
-
-

The implementation of High-Level API is based on maintaining XID in the RootContext.

-

Whether or not the operation of the current running application is in a global transaction context, just check if there is an XID in the RootContext.

-

The default implementation of RootContext is based on ThreadLocal, which is the XID is in the context of current thread.

-

Two classic scenes of Low-Level API :

-

1. The propagation of transaction context by remote invoke

-

Retrieve current XID by remote invoke:

-
String xid = RootContext.getXID();
-
-

Propagating the XID to the provider of service by RPC, bind the XID to current RootContext before executing the business logic of provider.

-
RootContext.bind(rpcXid);
-
-

2. Pause and recover of transaction

-

In a global transaction, if some business logic shouldn't be in the scope of the global transaction, unbind XID before invoke it.

-
String unbindXid = RootContext.unbind();
-
-

Rebind the XID back after the execution of related business logic to achieve recovering the global transaction.

-
RootContext.bind(unbindXid);
-
-
- - - - - - - diff --git a/en-us/docs/user/api.json b/en-us/docs/user/api.json deleted file mode 100644 index 8f60bcaa..00000000 --- a/en-us/docs/user/api.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "api.md", - "__html": "

1. Overview

\n

Seata API is devided into 2 categories: High-Level API and Low-Level API

\n
    \n
  • High-Level API : Used for defining and controlling transaction boundary, and querying transaction status.
  • \n
  • Low-Level API : Used for controlling the propagation of transaction context.
  • \n
\n

2. High-Level API

\n

2.1 GlobalTransaction

\n

GlobalTransaction class contains methods about begin transaction, commit transaction, rollback transaction and get status of transaction and so on.

\n
public interface GlobalTransaction {\n\n    /**\n     * Begin a global transaction(Use default transaction name and timeout)\n     */\n    void begin() throws TransactionException;\n\n    /**\n     * Begin a global transaction, and point out the timeout(use default transaction name)\n     */\n    void begin(int timeout) throws TransactionException;\n\n    /**\n     * Begin a global transaction, and point out the transaction name and timeout.\n     */\n    void begin(int timeout, String name) throws TransactionException;\n\n    /**\n     * Commit globally\n     */\n    void commit() throws TransactionException;\n\n    /**\n     * Rollback globally\n     */\n    void rollback() throws TransactionException;\n\n    /**\n     * Get the status of transaction\n     */\n    GlobalStatus getStatus() throws TransactionException;\n\n    /**\n     * Get the XID of transaction\n     */\n    String getXid();\n\n}\n
\n

2.2 GlobalTransactionContext

\n

GlobalTransaction instance can be retrieved from GlobalTransactionContext:

\n
\n    /**\n     * Retrieve current global transaction instance, if it doesn't exist, create a new one.\n     */\n    public static GlobalTransaction getCurrentOrCreate() {\n        GlobalTransaction tx = getCurrent();\n        if (tx == null) {\n            return createNew();\n        }\n        return tx;\n    }\n\n    /**\n     * Reload the global transaction identified by XID, the instance aren't allowed to begin transaction.\n     * This API is usually used for centralized handling of failed transaction later.\n     * For example, if it's time out to commit globally, the subsequent centralized processing steps are like this: reload the instance, from which retrieve the status, then recommit the transaction globally or not depends on the status value.\n     */\n    public static GlobalTransaction reload(String xid) throws TransactionException {\n        GlobalTransaction tx = new DefaultGlobalTransaction(xid, GlobalStatus.UnKnown, GlobalTransactionRole.Launcher) {\n            @Override\n            public void begin(int timeout, String name) throws TransactionException {\n                throw new IllegalStateException(\"Never BEGIN on a RELOADED GlobalTransaction. \");\n            }\n        };\n        return tx;\n    }\n
\n

2.3 TransactionalTemplate

\n

TransactionalTemplate: Wrap a business service invoke into a distributed transaction supported service with preceding GlobalTransaction and GlobalTransactionContext API.

\n
public class TransactionalTemplate {\n\n    public Object execute(TransactionalExecutor business) throws TransactionalExecutor.ExecutionException {\n\n        // 1. Get current global transaction instance or create a new one\n        GlobalTransaction tx = GlobalTransactionContext.getCurrentOrCreate();\n\n        // 2. Begin the global transaction\n        try {\n            tx.begin(business.timeout(), business.name());\n\n        } catch (TransactionException txe) {\n            // 2.1 Fail to begin\n            throw new TransactionalExecutor.ExecutionException(tx, txe,\n                TransactionalExecutor.Code.BeginFailure);\n\n        }\n\n        Object rs = null;\n        try {\n            // 3. invoke service\n            rs = business.execute();\n\n        } catch (Throwable ex) {\n\n            // Exception from business service invoke\n            try {\n                // Rollback globally\n                tx.rollback();\n\n                // 3.1 Global rollback success, throw original business exception\n                throw new TransactionalExecutor.ExecutionException(tx, TransactionalExecutor.Code.RollbackDone, ex);\n\n            } catch (TransactionException txe) {\n                // 3.2 Global rollback failed\n                throw new TransactionalExecutor.ExecutionException(tx, txe,\n                    TransactionalExecutor.Code.RollbackFailure, ex);\n\n            }\n\n        }\n\n        // 4. Commit globally\n        try {\n            tx.commit();\n\n        } catch (TransactionException txe) {\n            // 4.1 Global commit failed\n            throw new TransactionalExecutor.ExecutionException(tx, txe,\n                TransactionalExecutor.Code.CommitFailure);\n\n        }\n        return rs;\n    }\n\n}\n
\n

The exception of template method: ExecutionException

\n
    class ExecutionException extends Exception {\n\n        // Transaction instance threw exception\n        private GlobalTransaction transaction;\n\n        // Exception code:\n        // BeginFailure(Fail to begin transaction)\n        // CommitFailure(Fail to commit globally)\n        // RollbackFailure(Fail to rollback globally)\n        // RollbackDone(Global rollback success)\n        private Code code;\n\n        // Original exception triggered by rollback\n        private Throwable originalException;\n
\n

Outer calling logic try-catch the exception, and do something based on the exception code:

\n
    \n
  • BeginFailure (Fail to begin transaction): getCause() gets the framework exception of begin transaction, getOriginalException() is null.
  • \n
  • CommitFailure(Fail to commit globally): getCause() gets the framework exception of commit transaction, getOriginalException() is null.
  • \n
  • RollbackFailure (Fail to rollback globally):getCause() gets the framework exception of rollback transaction,getOriginalException() gets the original exception of business invoke.
  • \n
  • RollbackDone(Global rollback success): getCause() is null, getOriginalException() gets the original exception of business invoke.
  • \n
\n

3. Low-Level API

\n

3.1 RootContext

\n

RootContext: It's responsible for maintaining XID during runtime of application.

\n
    /**\n     * Get the global XID of the current running application\n     */\n    public static String getXID() {\n        return CONTEXT_HOLDER.get(KEY_XID);\n    }\n\n    /**\n     * Bind the global XID to the current application runtime\n     */\n    public static void bind(String xid) {\n        if (LOGGER.isDebugEnabled()) {\n            LOGGER.debug(\"bind \" + xid);\n        }\n        CONTEXT_HOLDER.put(KEY_XID, xid);\n    }\n\n    /**\n     * Unbind the global XID from the current application runtime, and return XID\n     */\n    public static String unbind() {\n        String xid = CONTEXT_HOLDER.remove(KEY_XID);\n        if (LOGGER.isDebugEnabled()) {\n            LOGGER.debug(\"unbind \" + xid);\n        }\n        return xid;\n    }\n\n    /**\n     * Check if the current application runtime is in the global transaction context\n     */\n    public static boolean inGlobalTransaction() {\n        return CONTEXT_HOLDER.get(KEY_XID) != null;\n    }\n
\n

The implementation of High-Level API is based on maintaining XID in the RootContext.

\n

Whether or not the operation of the current running application is in a global transaction context, just check if there is an XID in the RootContext.

\n

The default implementation of RootContext is based on ThreadLocal, which is the XID is in the context of current thread.

\n

Two classic scenes of Low-Level API :

\n

1. The propagation of transaction context by remote invoke

\n

Retrieve current XID by remote invoke:

\n
String xid = RootContext.getXID();\n
\n

Propagating the XID to the provider of service by RPC, bind the XID to current RootContext before executing the business logic of provider.

\n
RootContext.bind(rpcXid);\n
\n

2. Pause and recover of transaction

\n

In a global transaction, if some business logic shouldn't be in the scope of the global transaction, unbind XID before invoke it.

\n
String unbindXid = RootContext.unbind();\n
\n

Rebind the XID back after the execution of related business logic to achieve recovering the global transaction.

\n
RootContext.bind(unbindXid);\n
\n", - "link": "/en-us/docs/user/api.html", - "meta": { - "title": "Api Guide", - "keywords": "Seata", - "description": "Api Guide." - } -} \ No newline at end of file diff --git a/en-us/docs/user/microservice.html b/en-us/docs/user/microservice.html deleted file mode 100644 index a757f493..00000000 --- a/en-us/docs/user/microservice.html +++ /dev/null @@ -1,148 +0,0 @@ - - - - - - - - - - Microservice Framework Guide - - - - -
Documentation

Transaction Context

-

Transaction context of Seata is managed by RootContext.

-

When application begins a global transaction, RootContext will bind the XID of the transaction automatically, at the end of transaction(commit or rollback), RootContext will unbind the XID automatically.

-
// Bind XID
-RootContext.bind(xid);
-
-// Unbind XID
-String xid = RootContext.unbind();
-
-

Application retrieve the global transaction XID through the API of RootContext.

-
// Retrieve XID
-String xid = RootContext.getXID();
-
-

Whether application is running a global transaction, just check if an XID bound to RootContext.

-
    public static boolean inGlobalTransaction() {
-        return CONTEXT_HOLDER.get(KEY_XID) != null;
-    }
-
-

Transaction propagation

-

The mechanism of the global transaction of Seata is the propagation of transaction context, primarily, it's the propagation way of XID in runtime.

-

1. The propagation of transaction in the service

-

By default, RootContext is based on ThreadLocal, which is the XID is bound in the context of thread.

-
public class ThreadLocalContextCore implements ContextCore {
-
-    private ThreadLocal<Map<String, String>> threadLocal = new ThreadLocal<Map<String, String>>() {
-        @Override
-        protected Map<String, String> initialValue() {
-            return new HashMap<String, String>();
-        }
-
-    };
-
-    @Override
-    public String put(String key, String value) {
-        return threadLocal.get().put(key, value);
-    }
-
-    @Override
-    public String get(String key) {
-        return threadLocal.get().get(key);
-    }
-
-    @Override
-    public String remove(String key) {
-        return threadLocal.get().remove(key);
-    }
-}
-
-

So the inner XID of service is tracing by the same thread naturally, do nothing to propagate the transaction by default.

-

If it hopes to hung up the transaction context, implement it by the API of RootContext:

-
// Hung up(pause)
-String xid = RootContext.unbind();
-
-// TODO: Logic running out of the global transaction scope
-
-// recover the global transaction
-RootContext.bind(xid);
-
-
-

2. Transactional propagation across service calls

-

It's easy to know by the basic idea preceding:

-
-

The transaction propagation across service calls, essentially, propagate the XID via service call to service provider, and bind it to RootContext.

-
-

As long as it can be done, Seata can support any microservice framework in theory.

-

Interpretation of supporting Dubbo

-

Let's interpret the inner support for Dubbo RPC to illustrate how Seata supports a specific microservice framework in follows:

-

We use the org.apache.dubbo.rpc.Filter of Dubbo to support propagation of transaction.

-
/**
- * The type Transaction propagation filter.
- */
-@Activate(group = { Constants.PROVIDER, Constants.CONSUMER }, order = 100)
-public class TransactionPropagationFilter implements Filter {
-
-    private static final Logger LOGGER = LoggerFactory.getLogger(TransactionPropagationFilter.class);
-
-    @Override
-    public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException {
-        String xid = RootContext.getXID(); // Get XID of current transaction
-        String rpcXid = RpcContext.getContext().getAttachment(RootContext.KEY_XID); // Acquire the XID from RPC invoke
-        if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug("xid in RootContext[" + xid + "] xid in RpcContext[" + rpcXid + "]");
-        }
-        boolean bind = false;
-        if (xid != null) { // Consumer:Put XID into the attachment of RPC
-            RpcContext.getContext().setAttachment(RootContext.KEY_XID, xid);
-        } else {
-            if (rpcXid != null) { // Provider:Bind the XID propagated by RPC to current runtime
-                RootContext.bind(rpcXid);
-                bind = true;
-                if (LOGGER.isDebugEnabled()) {
-                    LOGGER.debug("bind[" + rpcXid + "] to RootContext");
-                }
-            }
-        }
-        try {
-            return invoker.invoke(invocation); // Business method invoke
-
-        } finally {
-            if (bind) { // Provider:Clean up XID after invoke
-                String unbindXid = RootContext.unbind();
-                if (LOGGER.isDebugEnabled()) {
-                    LOGGER.debug("unbind[" + unbindXid + "] from RootContext");
-                }
-                if (!rpcXid.equalsIgnoreCase(unbindXid)) {
-                    LOGGER.warn("xid in change during RPC from " + rpcXid + " to " + unbindXid);
-                    if (unbindXid != null) { // if there is new transaction begin, can't do clean up
-                        RootContext.bind(unbindXid);
-                        LOGGER.warn("bind [" + unbindXid + "] back to RootContext");
-                    }
-                }
-            }
-        }
-    }
-}
-
-
- - - - - - - diff --git a/en-us/docs/user/microservice.json b/en-us/docs/user/microservice.json deleted file mode 100644 index 63cd3844..00000000 --- a/en-us/docs/user/microservice.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "microservice.md", - "__html": "

Transaction Context

\n

Transaction context of Seata is managed by RootContext.

\n

When application begins a global transaction, RootContext will bind the XID of the transaction automatically, at the end of transaction(commit or rollback), RootContext will unbind the XID automatically.

\n
// Bind XID\nRootContext.bind(xid);\n\n// Unbind XID\nString xid = RootContext.unbind();\n
\n

Application retrieve the global transaction XID through the API of RootContext.

\n
// Retrieve XID\nString xid = RootContext.getXID();\n
\n

Whether application is running a global transaction, just check if an XID bound to RootContext.

\n
    public static boolean inGlobalTransaction() {\n        return CONTEXT_HOLDER.get(KEY_XID) != null;\n    }\n
\n

Transaction propagation

\n

The mechanism of the global transaction of Seata is the propagation of transaction context, primarily, it's the propagation way of XID in runtime.

\n

1. The propagation of transaction in the service

\n

By default, RootContext is based on ThreadLocal, which is the XID is bound in the context of thread.

\n
public class ThreadLocalContextCore implements ContextCore {\n\n    private ThreadLocal<Map<String, String>> threadLocal = new ThreadLocal<Map<String, String>>() {\n        @Override\n        protected Map<String, String> initialValue() {\n            return new HashMap<String, String>();\n        }\n\n    };\n\n    @Override\n    public String put(String key, String value) {\n        return threadLocal.get().put(key, value);\n    }\n\n    @Override\n    public String get(String key) {\n        return threadLocal.get().get(key);\n    }\n\n    @Override\n    public String remove(String key) {\n        return threadLocal.get().remove(key);\n    }\n}\n
\n

So the inner XID of service is tracing by the same thread naturally, do nothing to propagate the transaction by default.

\n

If it hopes to hung up the transaction context, implement it by the API of RootContext:

\n
// Hung up(pause)\nString xid = RootContext.unbind();\n\n// TODO: Logic running out of the global transaction scope\n\n// recover the global transaction\nRootContext.bind(xid);\n\n
\n

2. Transactional propagation across service calls

\n

It's easy to know by the basic idea preceding:

\n
\n

The transaction propagation across service calls, essentially, propagate the XID via service call to service provider, and bind it to RootContext.

\n
\n

As long as it can be done, Seata can support any microservice framework in theory.

\n

Interpretation of supporting Dubbo

\n

Let's interpret the inner support for Dubbo RPC to illustrate how Seata supports a specific microservice framework in follows:

\n

We use the org.apache.dubbo.rpc.Filter of Dubbo to support propagation of transaction.

\n
/**\n * The type Transaction propagation filter.\n */\n@Activate(group = { Constants.PROVIDER, Constants.CONSUMER }, order = 100)\npublic class TransactionPropagationFilter implements Filter {\n\n    private static final Logger LOGGER = LoggerFactory.getLogger(TransactionPropagationFilter.class);\n\n    @Override\n    public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException {\n        String xid = RootContext.getXID(); // Get XID of current transaction\n        String rpcXid = RpcContext.getContext().getAttachment(RootContext.KEY_XID); // Acquire the XID from RPC invoke\n        if (LOGGER.isDebugEnabled()) {\n            LOGGER.debug(\"xid in RootContext[\" + xid + \"] xid in RpcContext[\" + rpcXid + \"]\");\n        }\n        boolean bind = false;\n        if (xid != null) { // Consumer:Put XID into the attachment of RPC\n            RpcContext.getContext().setAttachment(RootContext.KEY_XID, xid);\n        } else {\n            if (rpcXid != null) { // Provider:Bind the XID propagated by RPC to current runtime\n                RootContext.bind(rpcXid);\n                bind = true;\n                if (LOGGER.isDebugEnabled()) {\n                    LOGGER.debug(\"bind[\" + rpcXid + \"] to RootContext\");\n                }\n            }\n        }\n        try {\n            return invoker.invoke(invocation); // Business method invoke\n\n        } finally {\n            if (bind) { // Provider:Clean up XID after invoke\n                String unbindXid = RootContext.unbind();\n                if (LOGGER.isDebugEnabled()) {\n                    LOGGER.debug(\"unbind[\" + unbindXid + \"] from RootContext\");\n                }\n                if (!rpcXid.equalsIgnoreCase(unbindXid)) {\n                    LOGGER.warn(\"xid in change during RPC from \" + rpcXid + \" to \" + unbindXid);\n                    if (unbindXid != null) { // if there is new transaction begin, can't do clean up\n                        RootContext.bind(unbindXid);\n                        LOGGER.warn(\"bind [\" + unbindXid + \"] back to RootContext\");\n                    }\n                }\n            }\n        }\n    }\n}\n
\n", - "link": "/en-us/docs/user/microservice.html", - "meta": { - "title": "Microservice Framework Guide", - "keywords": "Seata", - "description": "Microservice Framework Guide." - } -} \ No newline at end of file diff --git a/en-us/docs/user/quickstart.html b/en-us/docs/user/quickstart.html deleted file mode 100644 index 324de7bc..00000000 --- a/en-us/docs/user/quickstart.html +++ /dev/null @@ -1,212 +0,0 @@ - - - - - - - - - - Quick Start - - - - -
Documentation

Quick Start

-

Let's begin with a Microservices example.

-

Use case

-

A business logic for user purchasing commodities. The whole business logic is powered by 3 microservices:

-
    -
  • Storage service: deduct storage count on given commodity.
  • -
  • Order service: create order according to purchase request.
  • -
  • Account service: debit the balance of user's account.
  • -
-

Architecture

-

Architecture

-

StorageService

-
public interface StorageService {
-
-    /**
-     * deduct storage count
-     */
-    void deduct(String commodityCode, int count);
-}
-
-

OrderService

-
public interface OrderService {
-
-    /**
-     * create order
-     */
-    Order create(String userId, String commodityCode, int orderCount);
-}
-
-

AccountService

-
public interface AccountService {
-
-    /**
-     * debit balance of user's account
-     */
-    void debit(String userId, int money);
-}
-
-

Main business logic

-
public class BusinessServiceImpl implements BusinessService {
-
-    private StorageService storageService;
-
-    private OrderService orderService;
-
-    /**
-     * purchase
-     */
-    public void purchase(String userId, String commodityCode, int orderCount) {
-
-        storageService.deduct(commodityCode, orderCount);
-
-        orderService.create(userId, commodityCode, orderCount);
-    }
-}
-
-
public class OrderServiceImpl implements OrderService {
-
-    private OrderDAO orderDAO;
-
-    private AccountService accountService;
-
-    public Order create(String userId, String commodityCode, int orderCount) {
-
-        int orderMoney = calculate(commodityCode, orderCount);
-
-        accountService.debit(userId, orderMoney);
-
-        Order order = new Order();
-        order.userId = userId;
-        order.commodityCode = commodityCode;
-        order.count = orderCount;
-        order.money = orderMoney;
-
-        // INSERT INTO orders ...
-        return orderDAO.insert(order);
-    }
-}
-
-

Distributed Transaction Solution with SEATA

-

-

We just need an annotation @GlobalTransactional on business method:

-

-    @GlobalTransactional
-    public void purchase(String userId, String commodityCode, int orderCount) {
-        ......
-    }
-
-

Example powered by Dubbo + SEATA

-

Step 1: Setup database

-
    -
  • Requirement: MySQL with InnoDB engine.
  • -
-

Note: In fact, there should be 3 database for the 3 services in the example use case. However, we can just create one database and configure 3 data sources for simple.

-

Modify Spring XML with the database URL/username/password you just created.

-

dubbo-account-service.xml -dubbo-order-service.xml -dubbo-storage-service.xml

-
        <property name="url" value="jdbc:mysql://x.x.x.x:3306/xxx" />
-        <property name="username" value="xxx" />
-        <property name="password" value="xxx" />
-
-

Step 2: Create UNDO_LOG table

-

UNDO_LOG table is required by SEATA AT mode.

-
-- 注意此处0.3.0+ 增加唯一索引 ux_undo_log
-CREATE TABLE `undo_log` (
-  `id` bigint(20) NOT NULL AUTO_INCREMENT,
-  `branch_id` bigint(20) NOT NULL,
-  `xid` varchar(100) NOT NULL,
-  `context` varchar(128) NOT NULL,
-  `rollback_info` longblob NOT NULL,
-  `log_status` int(11) NOT NULL,
-  `log_created` datetime NOT NULL,
-  `log_modified` datetime NOT NULL,
-  `ext` varchar(100) DEFAULT NULL,
-  PRIMARY KEY (`id`),
-  UNIQUE KEY `ux_undo_log` (`xid`,`branch_id`)
-) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-
-

Step 3: Create tables for example business

-

-DROP TABLE IF EXISTS `storage_tbl`;
-CREATE TABLE `storage_tbl` (
-  `id` int(11) NOT NULL AUTO_INCREMENT,
-  `commodity_code` varchar(255) DEFAULT NULL,
-  `count` int(11) DEFAULT 0,
-  PRIMARY KEY (`id`),
-  UNIQUE KEY (`commodity_code`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-
-
-DROP TABLE IF EXISTS `order_tbl`;
-CREATE TABLE `order_tbl` (
-  `id` int(11) NOT NULL AUTO_INCREMENT,
-  `user_id` varchar(255) DEFAULT NULL,
-  `commodity_code` varchar(255) DEFAULT NULL,
-  `count` int(11) DEFAULT 0,
-  `money` int(11) DEFAULT 0,
-  PRIMARY KEY (`id`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-
-
-DROP TABLE IF EXISTS `account_tbl`;
-CREATE TABLE `account_tbl` (
-  `id` int(11) NOT NULL AUTO_INCREMENT,
-  `user_id` varchar(255) DEFAULT NULL,
-  `money` int(11) DEFAULT 0,
-  PRIMARY KEY (`id`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-
-

Step 4: Start Server

- -
Usage: sh seata-server.sh(for linux and mac) or cmd seata-server.bat(for windows) [options]
-  Options:
-    --host, -h
-      The host to bind.
-      Default: 0.0.0.0
-    --port, -p
-      The port to listen.
-      Default: 8091
-    --storeMode, -m
-      log store mode : file、db
-      Default: file
-    --help
-
-e.g.
-
-sh seata-server.sh -p 8091 -h 127.0.0.1 -m file
-
-

Step 5: Run example

-

Go to samples repo: seata-samples

-
    -
  • Start DubboAccountServiceStarter
  • -
  • Start DubboStorageServiceStarter
  • -
  • Start DubboOrderServiceStarter
  • -
  • Run DubboBusinessTester for demo test
  • -
-

TBD: scripts for run demo applications

-
- - - - - - - diff --git a/en-us/docs/user/quickstart.json b/en-us/docs/user/quickstart.json deleted file mode 100644 index 3eff67b2..00000000 --- a/en-us/docs/user/quickstart.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "quickstart.md", - "__html": "

Quick Start

\n

Let's begin with a Microservices example.

\n

Use case

\n

A business logic for user purchasing commodities. The whole business logic is powered by 3 microservices:

\n
    \n
  • Storage service: deduct storage count on given commodity.
  • \n
  • Order service: create order according to purchase request.
  • \n
  • Account service: debit the balance of user's account.
  • \n
\n

Architecture

\n

\"Architecture\"

\n

StorageService

\n
public interface StorageService {\n\n    /**\n     * deduct storage count\n     */\n    void deduct(String commodityCode, int count);\n}\n
\n

OrderService

\n
public interface OrderService {\n\n    /**\n     * create order\n     */\n    Order create(String userId, String commodityCode, int orderCount);\n}\n
\n

AccountService

\n
public interface AccountService {\n\n    /**\n     * debit balance of user's account\n     */\n    void debit(String userId, int money);\n}\n
\n

Main business logic

\n
public class BusinessServiceImpl implements BusinessService {\n\n    private StorageService storageService;\n\n    private OrderService orderService;\n\n    /**\n     * purchase\n     */\n    public void purchase(String userId, String commodityCode, int orderCount) {\n\n        storageService.deduct(commodityCode, orderCount);\n\n        orderService.create(userId, commodityCode, orderCount);\n    }\n}\n
\n
public class OrderServiceImpl implements OrderService {\n\n    private OrderDAO orderDAO;\n\n    private AccountService accountService;\n\n    public Order create(String userId, String commodityCode, int orderCount) {\n\n        int orderMoney = calculate(commodityCode, orderCount);\n\n        accountService.debit(userId, orderMoney);\n\n        Order order = new Order();\n        order.userId = userId;\n        order.commodityCode = commodityCode;\n        order.count = orderCount;\n        order.money = orderMoney;\n\n        // INSERT INTO orders ...\n        return orderDAO.insert(order);\n    }\n}\n
\n

Distributed Transaction Solution with SEATA

\n

\"\"

\n

We just need an annotation @GlobalTransactional on business method:

\n
\n    @GlobalTransactional\n    public void purchase(String userId, String commodityCode, int orderCount) {\n        ......\n    }\n
\n

Example powered by Dubbo + SEATA

\n

Step 1: Setup database

\n
    \n
  • Requirement: MySQL with InnoDB engine.
  • \n
\n

Note: In fact, there should be 3 database for the 3 services in the example use case. However, we can just create one database and configure 3 data sources for simple.

\n

Modify Spring XML with the database URL/username/password you just created.

\n

dubbo-account-service.xml\ndubbo-order-service.xml\ndubbo-storage-service.xml

\n
        <property name=\"url\" value=\"jdbc:mysql://x.x.x.x:3306/xxx\" />\n        <property name=\"username\" value=\"xxx\" />\n        <property name=\"password\" value=\"xxx\" />\n
\n

Step 2: Create UNDO_LOG table

\n

UNDO_LOG table is required by SEATA AT mode.

\n
-- 注意此处0.3.0+ 增加唯一索引 ux_undo_log\nCREATE TABLE `undo_log` (\n  `id` bigint(20) NOT NULL AUTO_INCREMENT,\n  `branch_id` bigint(20) NOT NULL,\n  `xid` varchar(100) NOT NULL,\n  `context` varchar(128) NOT NULL,\n  `rollback_info` longblob NOT NULL,\n  `log_status` int(11) NOT NULL,\n  `log_created` datetime NOT NULL,\n  `log_modified` datetime NOT NULL,\n  `ext` varchar(100) DEFAULT NULL,\n  PRIMARY KEY (`id`),\n  UNIQUE KEY `ux_undo_log` (`xid`,`branch_id`)\n) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;\n
\n

Step 3: Create tables for example business

\n
\nDROP TABLE IF EXISTS `storage_tbl`;\nCREATE TABLE `storage_tbl` (\n  `id` int(11) NOT NULL AUTO_INCREMENT,\n  `commodity_code` varchar(255) DEFAULT NULL,\n  `count` int(11) DEFAULT 0,\n  PRIMARY KEY (`id`),\n  UNIQUE KEY (`commodity_code`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n\nDROP TABLE IF EXISTS `order_tbl`;\nCREATE TABLE `order_tbl` (\n  `id` int(11) NOT NULL AUTO_INCREMENT,\n  `user_id` varchar(255) DEFAULT NULL,\n  `commodity_code` varchar(255) DEFAULT NULL,\n  `count` int(11) DEFAULT 0,\n  `money` int(11) DEFAULT 0,\n  PRIMARY KEY (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n\nDROP TABLE IF EXISTS `account_tbl`;\nCREATE TABLE `account_tbl` (\n  `id` int(11) NOT NULL AUTO_INCREMENT,\n  `user_id` varchar(255) DEFAULT NULL,\n  `money` int(11) DEFAULT 0,\n  PRIMARY KEY (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n
\n

Step 4: Start Server

\n\n
Usage: sh seata-server.sh(for linux and mac) or cmd seata-server.bat(for windows) [options]\n  Options:\n    --host, -h\n      The host to bind.\n      Default: 0.0.0.0\n    --port, -p\n      The port to listen.\n      Default: 8091\n    --storeMode, -m\n      log store mode : file、db\n      Default: file\n    --help\n\ne.g.\n\nsh seata-server.sh -p 8091 -h 127.0.0.1 -m file\n
\n

Step 5: Run example

\n

Go to samples repo: seata-samples

\n
    \n
  • Start DubboAccountServiceStarter
  • \n
  • Start DubboStorageServiceStarter
  • \n
  • Start DubboOrderServiceStarter
  • \n
  • Run DubboBusinessTester for demo test
  • \n
\n

TBD: scripts for run demo applications

\n", - "link": "/en-us/docs/user/quickstart.html", - "meta": { - "title": "Quick Start", - "keywords": "Seata", - "description": "Let's begin with a Microservices example." - } -} \ No newline at end of file diff --git a/en-us/index.html b/en-us/index.html deleted file mode 100644 index ceffa479..00000000 --- a/en-us/index.html +++ /dev/null @@ -1,32 +0,0 @@ - - - - - - - - - - Seata - - - - -

Seata

Seata is an open source distributed transaction solution that delivers high performance and easy to use distributed transaction services under a microservices architecture.

What is Seata?

Seata is an open source distributed transaction solution that delivers high performance and easy to use distributed transaction services under a microservices architecture. Before the open-source of Seata, the internal version of Seata played a role of distributed consistency Middleware in Ali economy, helping the economy to survive the double 11 of the past years smoothly, and providing strong support for businesses of all departments. After years of precipitation and accumulation, commercial products have been sold in Alibaba cloud and financial cloud. 2019.1 in order to create a more complete technological ecology and inclusive technological achievements, Seata officially announced open source to the outside world. In the future, Seata will help its technology become more reliable and complete in the form of community building.

Feature List

  • Microservices Framework Support

    RPC frameworks such as Dubbo, Spring Cloud, Sofa-RPC, Motan, and grpc are currently supported, and other frameworks are continuously integrated.

  • AT mode

    Provides non-intrusive automatic compensation transaction mode, currently supports MySQL, Oracle's AT mode, PostgreSQL, In developing the H2.

  • TCC mode

    Support TCC mode and mix with AT for greater flexibility.

  • SAGA mode

    Provide an effective solution for long transactions.

  • XA mode (under development)

    Support for XA schemas for databases that have implemented XA interfaces.

  • High availability

    Support cluster mode based on database storage, strong horizontal scalability.

- - - - - - - diff --git a/index.html b/index.html deleted file mode 100644 index 9599256c..00000000 --- a/index.html +++ /dev/null @@ -1,18 +0,0 @@ - - - - - - - - - - \ No newline at end of file diff --git a/md_json/blog.json b/md_json/blog.json deleted file mode 100644 index 2ad6692c..00000000 --- a/md_json/blog.json +++ /dev/null @@ -1,258 +0,0 @@ -{ - "en-us": [ - { - "filename": "download.md", - "link": "/en-us/blog/download.html", - "meta": { - "title": "Downloads", - "keywords": "Seata, Downloads, Version", - "description": "This article will introduce you how to understand the details of each version and upgrade matters needing attention." - } - }, - { - "filename": "manual-transaction-mode.md", - "link": "/en-us/blog/manual-transaction-mode.html", - "meta": { - "title": "MT mode", - "keywords": "MT mode", - "description": "introduce MT mode", - "author": "kmmshmily", - "date": "2019-02-13" - } - }, - { - "filename": "quick-start-use-seata-and-dubbo-services.md", - "link": "/en-us/blog/quick-start-use-seata-and-dubbo-services.html", - "meta": { - "title": "How to use Seata to ensure consistency between Dubbo Microservices", - "keywords": "Dubbo,Seata,Consistency", - "description": "This article will introduce you how to use Seata to ensure consistency between Dubbo Microservices.", - "author": "slievrly", - "date": "2019-03-07" - } - } - ], - "zh-cn": [ - { - "filename": "design-more-flexable-application-by-saga.md", - "link": "/zh-cn/blog/design-more-flexable-application-by-saga.html", - "meta": { - "title": "基于 Seata Saga 设计更有弹性的金融应用", - "keywords": "Saga,Seata,一致性,金融,弹性,分布式,事务", - "description": "本文从金融分布式应用开发的一些痛点出发,结合理论和实践对社区和行业的解决方案进行了分析,并讲解了如何基于Seata saga设计更有弹性的金融应用", - "author": "long187", - "date": "2019-11-04" - } - }, - { - "filename": "download.md", - "link": "/zh-cn/blog/download.html", - "meta": { - "title": "下载中心", - "keywords": "Seata, Downloads, Version", - "description": "本文将向你介绍如何点击了解各版本详情和升级注意事项。" - } - }, - { - "filename": "how-to-support-spring-cloud.md", - "link": "/zh-cn/blog/how-to-support-spring-cloud.html", - "meta": { - "title": "Fescar 与 Spring Cloud 集成源码深度剖析", - "author": "郭树抗 季敏", - "date": "2019/04/15", - "keywords": "fescar、seata、分布式事务" - } - }, - { - "filename": "integrate-seata-with-spring-cloud.md", - "link": "/zh-cn/blog/integrate-seata-with-spring-cloud.html", - "meta": { - "title": "Seata(Fescar)分布式事务 整合 Spring Cloud", - "author": "大菲.Fei", - "date": "2019/04/15", - "keywords": "fescar、seata、分布式事务" - } - }, - { - "filename": "manual-transaction-mode.md", - "link": "/zh-cn/blog/manual-transaction-mode.html", - "meta": { - "title": "MT 模式", - "keywords": "MT 模式", - "description": "介绍 MT 模式", - "author": "kmmshmily", - "date": "2019-02-13" - } - }, - { - "filename": "quick-start-use-seata-and-dubbo-services.md", - "link": "/zh-cn/blog/quick-start-use-seata-and-dubbo-services.html", - "meta": { - "title": "如何使用Seata保证Dubbo微服务间的一致性", - "keywords": "Dubbo,Seata,一致性", - "description": "本文主要介绍如何使用Seata保证Dubbo微服务间的一致性", - "author": "slievrly", - "date": "2019-03-07" - } - }, - { - "filename": "seata-analysis-go-server.md", - "link": "/zh-cn/blog/seata-analysis-go-server.html", - "meta": { - "title": "Seata分布式Go Server正式开源-TaaS设计简介", - "author": "fagongzi(zhangxu19830126@gmail.com)", - "date": "2019/04/23", - "keywords": "seata、分布式事务、高可用" - } - }, - { - "filename": "seata-analysis-java-client.md", - "link": "/zh-cn/blog/seata-analysis-java-client.html", - "meta": { - "title": "分布式事务之Seata-Client原理及流程详解", - "author": "fangliangsheng", - "date": "2019/04/15", - "keywords": "fescar、seata、分布式事务" - } - }, - { - "filename": "seata-analysis-java-server.md", - "link": "/zh-cn/blog/seata-analysis-java-server.html", - "meta": { - "title": "深度剖析一站式分布式事务方案Seata-Server", - "author": "李钊,季敏", - "date": "2019/04/08", - "keywords": "fescar、seata、分布式事务" - } - }, - { - "filename": "seata-analysis-simple.md", - "link": "/zh-cn/blog/seata-analysis-simple.html", - "meta": { - "title": "Fescar分布式事务原理解析探秘", - "author": "陈凯玲", - "keywords": "Fescar、分布式事务", - "date": "2019/02/18" - } - }, - { - "filename": "seata-at-mode-design.md", - "link": "/zh-cn/blog/seata-at-mode-design.html", - "meta": { - "title": "分布式事务中间件 Seata 的设计原理", - "author": "张乘辉", - "keywords": "Seata、分布式事务、AT模式", - "description": "AT 模式设计原理", - "date": "2019/07/11" - } - }, - { - "filename": "seata-at-mode-start-rm-tm.md", - "link": "/zh-cn/blog/seata-at-mode-start-rm-tm.html", - "meta": { - "title": "Seata 客户端需要同时启动 RM 和 TM 吗?", - "author": "张乘辉", - "keywords": "Seata、分布式事务、AT模式、RM、TM", - "description": "关于 Seata 后续优化的一个讨论点", - "date": "2019/11/28" - } - }, - { - "filename": "seata-at-mode-start.md", - "link": "/zh-cn/blog/seata-at-mode-start.html", - "meta": { - "title": "Seata AT 模式启动源码分析", - "author": "张乘辉", - "keywords": "Seata、分布式事务、AT模式", - "description": "Seata 源码分析系列", - "date": "2019/11/27" - } - }, - { - "filename": "seata-at-tcc-saga.md", - "link": "/zh-cn/blog/seata-at-tcc-saga.html", - "meta": { - "title": "分布式事务 Seata 及其三种模式详解", - "keywords": "Saga,Seata,AT,TCC,一致性,金融,分布式,事务", - "description": "着重分享分布式事务产生的背景、理论基础,以及 Seata 分布式事务的原理以及三种模式(AT、TCC、Saga)的分布式事务实现", - "author": "long187", - "date": "2019-08-11" - } - }, - { - "filename": "seata-config-center.md", - "link": "/zh-cn/blog/seata-config-center.html", - "meta": { - "title": "Seata 配置中心实现原理", - "author": "张乘辉", - "keywords": "Seata、Config", - "description": "Seata 可以支持多个第三方配置中心,那么 Seata 是如何同时兼容那么多个配置中心的呢?", - "date": "2019/12/12" - } - }, - { - "filename": "seata-mybatisplus-analysis.md", - "link": "/zh-cn/blog/seata-mybatisplus-analysis.html", - "meta": { - "title": "透过源码解决SeataAT模式整合Mybatis-Plus失去MP特性的问题", - "keywords": "Seata,Mybatis-Plus,分布式事务", - "description": "本文讲述如何透过源码解决Seata整合Mybatis-Plus失去MP特性的问题", - "author": "FUNKYE", - "date": "2019/11/30" - } - }, - { - "filename": "seata-nacos-analysis.md", - "link": "/zh-cn/blog/seata-nacos-analysis.html", - "meta": { - "title": "Seata分布式事务启用Nacos做配置中心", - "keywords": "Seata,Nacos,分布式事务", - "description": "本文讲述如何使用Seata整合Nacos配置", - "author": "FUNKYE", - "date": "2019/12/02" - } - }, - { - "filename": "seata-nacos-docker.md", - "link": "/zh-cn/blog/seata-nacos-docker.html", - "meta": { - "title": "Docker部署Seata与Nacos整合", - "keywords": "Seata,Nacos,分布式事务", - "description": "本文讲述如何使用Seata整合Nacos配置的Docker部署", - "author": "FUNKYE", - "date": "2019/12/03" - } - }, - { - "filename": "springboot-dubbo-mybatisplus-seata.md", - "link": "/zh-cn/blog/springboot-dubbo-mybatisplus-seata.html", - "meta": { - "title": "SpringBoot+Dubbo+MybatisPlus整合seata分布式事务", - "keywords": "Seata,dubbo,mybatis,分布式事务", - "description": "本文讲述如何将springboot+dubbo+mybatisplus整合seata直连方式搭建", - "author": "FUNKYE", - "date": "2019/11/29" - } - }, - { - "filename": "tcc-mode-applicable-scenario-analysis.md", - "link": "/zh-cn/blog/tcc-mode-applicable-scenario-analysis.html", - "meta": { - "title": "TCC适用模型与适用场景分析", - "author": "zhangthen", - "date": "2019/03/27", - "keywords": "seata、分布式事务、TCC、roadmap" - } - }, - { - "filename": "tcc-mode-design-principle.md", - "link": "/zh-cn/blog/tcc-mode-design-principle.html", - "meta": { - "title": "TCC 理论及设计实现指南介绍", - "author": "zhangthen", - "date": "2019/03/26", - "keywords": "fescar、分布式事务、TCC、roadmap" - } - } - ] -} \ No newline at end of file diff --git a/md_json/docs.json b/md_json/docs.json deleted file mode 100644 index ed9462c0..00000000 --- a/md_json/docs.json +++ /dev/null @@ -1,506 +0,0 @@ -{ - "en-us": [ - { - "filename": "at-mode.md", - "link": "/en-us/docs/dev/mode/at-mode.html", - "meta": { - "title": "Seata AT Mode", - "keywords": "Seata, AT mode", - "description": "Seata AT mode." - } - }, - { - "filename": "saga-mode.md", - "link": "/en-us/docs/dev/mode/saga-mode.html", - "meta": { - "title": "Seata Saga Mode", - "keywords": "Seata, Saga mode", - "description": "Seata Saga mode." - } - }, - { - "filename": "tcc-mode.md", - "link": "/en-us/docs/dev/mode/tcc-mode.html", - "meta": { - "title": "Seata TCC Mode", - "keywords": "Seata, TCC Mode", - "description": "Seata TCC mode." - } - }, - { - "filename": "seata-mertics.md", - "link": "/en-us/docs/dev/seata-mertics.html", - "meta": { - "title": "Seata Metrics", - "keywords": "Seata, Metrics", - "description": "Seata Metrics." - } - }, - { - "filename": "label-an-issue-guide_dev.md", - "link": "/en-us/docs/developers/committer-guide/label-an-issue-guide_dev.html", - "meta": { - "title": "Label an Issue", - "keywords": "Seata", - "description": "Label an Issue." - } - }, - { - "filename": "release-guide_dev.md", - "link": "/en-us/docs/developers/committer-guide/release-guide_dev.html", - "meta": {} - }, - { - "filename": "website-guide_dev.md", - "link": "/en-us/docs/developers/committer-guide/website-guide_dev.html", - "meta": { - "title": "Website Guide", - "keywords": "Seata", - "description": "Website Guide." - } - }, - { - "filename": "new-contributor-guide_dev.md", - "link": "/en-us/docs/developers/contributor-guide/new-contributor-guide_dev.html", - "meta": { - "title": "New contributor guide", - "keywords": "Seata, contributor", - "description": "This is a guide for new comers who wants to contribute to Seata." - } - }, - { - "filename": "reporting-security-issues_dev.md", - "link": "/en-us/docs/developers/contributor-guide/reporting-security-issues_dev.html", - "meta": { - "title": "New contributor guide", - "keywords": "Seata, contributor", - "description": "This is a guide for new comers who wants to contribute to Seata." - } - }, - { - "filename": "test-coverage-guide_dev.md", - "link": "/en-us/docs/developers/contributor-guide/test-coverage-guide_dev.html", - "meta": { - "title": "Test coverage guide", - "keywords": "Seata, coverage", - "description": "Test coverage guide." - } - }, - { - "filename": "developers_dev.md", - "link": "/en-us/docs/developers/developers_dev.html", - "meta": { - "title": "Developers", - "keywords": "Seata, Developers", - "description": "Seata Team." - } - }, - { - "filename": "guide_dev.md", - "link": "/en-us/docs/developers/guide_dev.html", - "meta": { - "title": "Contributing to Seata", - "keywords": "Seata", - "description": "It is warmly welcomed if you have interest to hack on Seata. First, we encourage this kind of willing very much. And here is a list of contributing guide for you." - } - }, - { - "filename": "deploy-by-docker.md", - "link": "/en-us/docs/ops/deploy-by-docker.html", - "meta": { - "hidden": "true", - "title": "Deploy Seata Server By Docker", - "keywords": "docker", - "description": "Deploy Seata Server By Docker", - "author": "helloworlde", - "date": "2019-11-25" - } - }, - { - "filename": "deploy-by-helm.md", - "link": "/en-us/docs/ops/deploy-by-helm.html", - "meta": { - "hidden": "true", - "title": "Deploy Seata Server By Helm", - "keywords": "kubernetes,helm,ops", - "description": "Deploy Seata Server By Helm", - "author": "helloworlde", - "date": "2019-12-01" - } - }, - { - "filename": "deploy-by-kubernetes.md", - "link": "/en-us/docs/ops/deploy-by-kubernetes.html", - "meta": { - "hidden": "true", - "title": "Deploy Seata Server By Kubernetes", - "keywords": "kubernetes,ops", - "description": "Deploy Seata Server By Kubernetes", - "author": "helloworlde", - "date": "2019-12-01" - } - }, - { - "filename": "deploy-server.md", - "link": "/en-us/docs/ops/deploy-server.html", - "meta": { - "title": "Deploy Server", - "keywords": "Seata", - "description": "The server can deploy by multiple method: Directly, Docker, Docker-Compose, Kubernetes, Helm." - } - }, - { - "filename": "multi-configuration-isolation.md", - "link": "/en-us/docs/ops/multi-configuration-isolation.html", - "meta": { - "title": "Multi-configuration Isolation", - "keywords": "Seata", - "description": "Seata supports Multi-configuration Isolation since 0.6.1,You can configure it in the following steps." - } - }, - { - "filename": "faq.md", - "link": "/en-us/docs/overview/faq.html", - "meta": { - "title": "Seata FAQ", - "keywords": "Seata", - "description": "Seata FAQ." - } - }, - { - "filename": "terminology.md", - "link": "/en-us/docs/overview/terminology.html", - "meta": { - "title": "Seata Terminology", - "keywords": "Seata", - "description": "Seata Terminology." - } - }, - { - "filename": "what-is-seata.md", - "link": "/en-us/docs/overview/what-is-seata.html", - "meta": { - "title": "What Is Seata", - "keywords": "Seata", - "description": "Seata is an open source distributed transaction solution dedicated to providing high performance and easy to use distributed transaction services. Seata will provide users with AT, TCC, SAGA, and XA transaction models to create a one-stop distributed solution for users." - } - }, - { - "filename": "api.md", - "link": "/en-us/docs/user/api.html", - "meta": { - "title": "Api Guide", - "keywords": "Seata", - "description": "Api Guide." - } - }, - { - "filename": "microservice.md", - "link": "/en-us/docs/user/microservice.html", - "meta": { - "title": "Microservice Framework Guide", - "keywords": "Seata", - "description": "Microservice Framework Guide." - } - }, - { - "filename": "quickstart.md", - "link": "/en-us/docs/user/quickstart.html", - "meta": { - "title": "Quick Start", - "keywords": "Seata", - "description": "Let's begin with a Microservices example." - } - } - ], - "zh-cn": [ - { - "filename": "at-mode.md", - "link": "/zh-cn/docs/dev/mode/at-mode.html", - "meta": { - "title": "Seata AT 模式", - "keywords": "Seata", - "description": "Seata AT 模式。" - } - }, - { - "filename": "saga-mode.md", - "link": "/zh-cn/docs/dev/mode/saga-mode.html", - "meta": { - "title": "Seata Saga 模式", - "keywords": "Seata", - "description": "Seata Saga 模式。" - } - }, - { - "filename": "tcc-mode.md", - "link": "/zh-cn/docs/dev/mode/tcc-mode.html", - "meta": { - "title": "Seata Tcc 模式", - "keywords": "Seata", - "description": "Seata Tcc 模式。" - } - }, - { - "filename": "seata-mertics.md", - "link": "/zh-cn/docs/dev/seata-mertics.html", - "meta": { - "title": "Metrics", - "keywords": "Seata", - "description": "Metrics。" - } - }, - { - "filename": "label-an-issue-guide_dev.md", - "link": "/zh-cn/docs/developers/committer-guide/label-an-issue-guide_dev.html", - "meta": { - "title": "给问题打标签", - "keywords": "Seata", - "description": "如果您正在处理一个问题,请记得给这个问题标记一个或者多个您认为有意义的标签。有了标签,其他开发人员就会很轻松地识别出问题,以便对其进行分类并跟踪进度。" - } - }, - { - "filename": "release-guide_dev.md", - "link": "/zh-cn/docs/developers/committer-guide/release-guide_dev.html", - "meta": { - "title": "Seata版本向导", - "keywords": "Seata", - "description": "Seata版本向导。" - } - }, - { - "filename": "website-guide_dev.md", - "link": "/zh-cn/docs/developers/committer-guide/website-guide_dev.html", - "meta": { - "title": "网站向导", - "keywords": "Seata", - "description": "Seata 网站向导。" - } - }, - { - "filename": "new-contributor-guide_dev.md", - "link": "/zh-cn/docs/developers/contributor-guide/new-contributor-guide_dev.html", - "meta": { - "title": "新贡献者向导", - "keywords": "Seata", - "description": "这篇向导旨在给正在准备向Seata提交贡献的新手提供指导。" - } - }, - { - "filename": "reporting-security-issues_dev.md", - "link": "/zh-cn/docs/developers/contributor-guide/reporting-security-issues_dev.html", - "meta": { - "title": "报告安全问题", - "keywords": "Seata", - "description": "Seata在消除其软件项目中的安全性问题方面采取严格的立场,对与其功能和特性有关的问题非常敏感并很快提出。" - } - }, - { - "filename": "test-coverage-guide_dev.md", - "link": "/zh-cn/docs/developers/contributor-guide/test-coverage-guide_dev.html", - "meta": { - "title": "测试覆盖率向导", - "keywords": "Seata", - "description": "测试覆盖率向导。" - } - }, - { - "filename": "developers_dev.md", - "link": "/zh-cn/docs/developers/developers_dev.html", - "meta": { - "title": "Seata 维护者", - "keywords": "Seata, 维护者", - "description": "Seata 维护者名单" - } - }, - { - "filename": "guide_dev.md", - "link": "/zh-cn/docs/developers/guide_dev.html", - "meta": { - "title": "为Seata贡献", - "keywords": "Seata", - "description": "如果您有兴趣攻克Seata,欢迎您。首先,我们非常鼓励这种意愿。这是为您提供帮助的列表。" - } - }, - { - "filename": "config-center.md", - "link": "/zh-cn/docs/ops/config-center.html", - "meta": { - "title": "配置中心初始化", - "keywords": "Seata", - "description": "配置中心初始化。" - } - }, - { - "filename": "deploy-by-docker.md", - "link": "/zh-cn/docs/ops/deploy-by-docker.html", - "meta": { - "hidden": "true", - "title": "使用 Docker 部署 Seata Server", - "keywords": "docker,docker-compose,ops", - "description": "使用 Docker 部署 Seata Server", - "author": "helloworlde", - "date": "2019-11-25" - } - }, - { - "filename": "deploy-by-helm.md", - "link": "/zh-cn/docs/ops/deploy-by-helm.html", - "meta": { - "hidden": "true", - "title": "使用 Helm 部署 Seata Server", - "keywords": "kubernetes,helm,ops", - "description": "使用 Helm 部署 Seata Server", - "author": "helloworlde", - "date": "2019-12-01" - } - }, - { - "filename": "deploy-by-kubernetes.md", - "link": "/zh-cn/docs/ops/deploy-by-kubernetes.html", - "meta": { - "hidden": "true", - "title": "使用 Kubernetes 部署 Seata Server", - "keywords": "kubernetes,ops", - "description": "使用 Kubernetes 部署 Seata Server", - "author": "helloworlde", - "date": "2019-12-01" - } - }, - { - "filename": "deploy-guide-beginner.md", - "link": "/zh-cn/docs/ops/deploy-guide-beginner.html", - "meta": { - "title": "Seata部署指南", - "keywords": "Seata", - "description": "Seata分TC、TM和RM三个角色,TC(Server端)为单独服务端部署,TM和RM(Client端)由业务系统集成。" - } - }, - { - "filename": "deploy-server.md", - "link": "/zh-cn/docs/ops/deploy-server.html", - "meta": { - "title": "部署 Server", - "keywords": "Seata", - "description": "Server支持多种方式部署:直接部署,使用 Docker, 使用 Docker-Compose, 使用 Kubernetes, 使用 Helm。" - } - }, - { - "filename": "operation.md", - "link": "/zh-cn/docs/ops/operation.html", - "meta": { - "title": "运维指南", - "keywords": "Seata", - "description": "Seata支持在TC、TM和RM三个角色开启Metrics数据采集并输出到Prometheus监控系统中。" - } - }, - { - "filename": "faq.md", - "link": "/zh-cn/docs/overview/faq.html", - "meta": { - "title": "Seata常见问题", - "keywords": "Seata", - "description": "Seata 常见问题。" - } - }, - { - "filename": "terminology.md", - "link": "/zh-cn/docs/overview/terminology.html", - "meta": { - "title": "Seata术语", - "keywords": "Seata", - "description": "Seata术语。" - } - }, - { - "filename": "what-is-seata.md", - "link": "/zh-cn/docs/overview/what-is-seata.html", - "meta": { - "title": "Seata 是什么", - "keywords": "Seata", - "description": "Seata 是一款开源的分布式事务解决方案,致力于提供高性能和简单易用的分布式事务服务。Seata 将为用户提供了 AT、TCC、SAGA 和 XA 事务模式,为用户打造一站式的分布式解决方案。" - } - }, - { - "filename": "api.md", - "link": "/zh-cn/docs/user/api.html", - "meta": { - "title": "Seata api", - "keywords": "Seata", - "description": "Seata API 分为两大类:High-Level API 和 Low-Level API。" - } - }, - { - "filename": "configurations.md", - "link": "/zh-cn/docs/user/configurations.html", - "meta": { - "title": "Seata 参数配置", - "keywords": "Seata", - "description": "Seata 参数配置。" - } - }, - { - "filename": "configurations090.md", - "link": "/zh-cn/docs/user/configurations090.html", - "meta": { - "title": "Seata 参数配置 0.9.0版本", - "keywords": "Seata", - "description": "Seata 参数配置 0.9.0版本。" - } - }, - { - "filename": "datasource.md", - "link": "/zh-cn/docs/user/datasource.html", - "meta": { - "title": "Seata 数据源支持", - "keywords": "Seata", - "description": "Seata 数据源支持。" - } - }, - { - "filename": "microservice.md", - "link": "/zh-cn/docs/user/microservice.html", - "meta": { - "title": "Seata 微服务框架支持", - "keywords": "Seata", - "description": "Seata 微服务框架支持。" - } - }, - { - "filename": "ormframework.md", - "link": "/zh-cn/docs/user/ormframework.html", - "meta": { - "title": "Seata ORM框架支持", - "keywords": "Seata", - "description": "Seata ORM框架支持。" - } - }, - { - "filename": "quickstart.md", - "link": "/zh-cn/docs/user/quickstart.html", - "meta": { - "title": "Seata 快速开始", - "keywords": "Seata", - "description": "Seata 快速开始。" - } - }, - { - "filename": "saga.md", - "link": "/zh-cn/docs/user/saga.html", - "meta": { - "title": "Seata Saga 模式", - "keywords": "Seata", - "description": "Saga模式是SEATA提供的长事务解决方案,在Saga模式中,业务流程中每个参与者都提交本地事务,当出现某一个参与者失败则补偿前面已经成功的参与者,一阶段正向服务和二阶段补偿服务都由业务开发实现。" - } - }, - { - "filename": "spring.md", - "link": "/zh-cn/docs/user/spring.html", - "meta": { - "title": "Seata Spring支持", - "keywords": "Seata", - "description": "Seata Spring支持。" - } - } - ] -} \ No newline at end of file diff --git a/zh-cn/blog/design-more-flexable-application-by-saga.html b/zh-cn/blog/design-more-flexable-application-by-saga.html deleted file mode 100644 index 6010f014..00000000 --- a/zh-cn/blog/design-more-flexable-application-by-saga.html +++ /dev/null @@ -1,506 +0,0 @@ - - - - - - - - - - 基于 Seata Saga 设计更有弹性的金融应用 - - - - -

基于 Seata Saga 设计更有弹性的金融应用

-

Seata 意为:Simple Extensible Autonomous Transaction Architecture,是一套一站式分布式事务解决方案,提供了 AT、TCC、Saga 和 XA 事务模式,本文详解其中的 Saga 模式。
项目地址:https://github.com/seata/seata

-

本文作者:屹远(陈龙),蚂蚁金服分布式事务核心研发,Seata Committer。

-

-

金融分布式应用开发的痛点

-

分布式系统有一个比较明显的问题就是,一个业务流程需要组合一组服务。这样的事情在微服务下就更为明显了,因为这需要业务上的一致性的保证。也就是说,如果一个步骤失败了,那么要么回滚到以前的服务调用,要么不断重试保证所有的步骤都成功。---《左耳听风-弹力设计之“补偿事务”》

-

而在金融领域微服务架构下的业务流程往往会更复杂,流程很长,比如一个互联网微贷业务流程调十几个服务很正常,再加上异常处理的流程那就更复杂了,做过金融业务开发的同学会很有体感。

-

所以在金融分布式应用开发过程中我们面临一些痛点:

-
    -
  • 业务一致性难以保障
  • -
-

我们接触到的大多数业务(比如在渠道层、产品层、集成层的系统),为了保障业务最终一致性,往往会采用“补偿”的方式来做,如果没有一个协调器来支持,开发难度是比较大的,每一步都要在 catch 里去处理前面所有的“回滚”操作,这将会形成“箭头形”的代码,可读性及维护性差。或者重试异常的操作,如果重试不成功可能要转异步重试,甚至最后转人工处理。这些都给开发人员带来极大的负担,开发效率低,且容易出错。

-
    -
  • 业务状态难以管理
  • -
-

业务实体很多、实体的状态也很多,往往做完一个业务活动后就将实体的状态更新到了数据库里,没有一个状态机来管理整个状态的变迁过程,不直观,容易出错,造成业务进入一个不正确的状态。

-
    -
  • 幂等性难以保障
  • -
-

服务的幂等性是分布式环境下的基本要求,为了保证服务的幂等性往往需要服务开发者逐个去设计,有用数据库唯一键实现的,有用分布式缓存实现的,没有一个统一的方案,开发人员负担大,也容易遗漏,从而造成资损。

-
    -
  • 业务监控运维难,缺乏统一的差错守护能力
  • -
-

业务的执行情况监控一般通过打印日志,再基于日志监控平台查看,大多数情况是没有问题的,但是如果业务出错,这些监控缺乏当时的业务上下文,对排查问题不友好,往往需要再去数据库里查。同时日志的打印也依赖于开发,容易遗漏。对于补偿事务往往需要有“差错守护触发补偿”、“工人触发补偿”操作,没有统一的差错守护和处理规范,这些都要开发者逐个开发,负担沉重。

-

-

理论基础

-

一些场景下,我们对数据有强一致性的需求时,会采用在业务层上需要使用“两阶段提交”这样的分布式事务方案。而在另外一些场景下,我们并不需要这么强的一致性,那就只需要保证最终一致性就可以了。

-

例如蚂蚁金服目前在金融核心系统使用的就是 TCC 模式,金融核心系统的特点是一致性要求高(业务上的隔离性)、短流程、并发高。

-

而在很多金融核心以上的业务(比如在渠道层、产品层、集成层的系统),这些系统的特点是最终一致即可、流程多、流程长、还可能要调用其它公司的服务(如金融网络)。这是如果每个服务都开发 Try、Confirm、Cancel 三个方法成本高。如果事务中有其它公司的服务,也无法要求其它公司的服务也遵循 TCC 这种开发模式。同时流程长,事务边界太长会影响性能。

-

对于事务我们都知道 ACID,也很熟悉 CAP 理论最多只能满足其中两个,所以,为了提高性能,出现了 ACID 的一个变种 BASE。ACID 强调的是一致性(CAP 中的 C),而 BASE 强调的是可用性(CAP 中的 A)。我们知道,在很多情况下,我们是无法做到强一致性的 ACID 的。特别是我们需要跨多个系统的时候,而且这些系统还不是由一个公司所提供的。BASE 的系统倾向于设计出更加有弹力的系统,在短时间内,就算是有数据不同步的风险,我们也应该允许新的交易可以发生,而后面我们在业务上将可能出现问题的事务通过补偿的方式处理掉,以保证最终的一致性。

-

所以我们在实际开发中会进行取舍,对于更多的金融核心以上的业务系统可以采用补偿事务,补偿事务处理方面在30年前就提出了 Saga 理论,随着微服务的发展,近些年才逐步受到大家的关注。目前业界比较也公认 Saga 是作为长事务的解决方案。

-
-

https://github.com/aphyr/dist-sagas/blob/master/sagas.pdf[1] -http://microservices.io/patterns/data/saga.html[2]

-
-

-

社区和业界的方案

-

-

Apache Camel Saga

-

Camel 是实现 EIP(Enterprise Integration Patterns)企业集成模式的一款开源产品,它基于事件驱动的架构,有着良好的性能和吞吐量,它在2.21版本新增加了 Saga EIP。

-

Saga EIP 提供了一种方式可以通过 camel route 定义一系列有关联关系的 Action,这些 Action 要么都执行成功,要么都回滚,Saga 可以协调任何通讯协议的分布式服务或本地服务,并达到全局的最终一致性。Saga 不要求整个处理在短时间内完成,因为它不占用任何数据库锁,它可以支持需要长时间处理的请求,从几秒到几天,Camel 的 Saga EIP 是基于 Microprofile 的 LRA[3](Long Running Action),同样也是支持协调任何通讯协议任何语言实现的分布式服务。

-

Saga 的实现不会对数据进行加锁,而是在给操作定义它的“补偿操作”,当正常流程执行出错的时候触发那些已经执行过的操作的“补偿操作”,将流程回滚掉。“补偿操作”可以在 Camel route 上用 Java 或 XML DSL(Definition Specific Language)来定义。

-

下面是一个 Java DSL 示例:

-
// action
-from("direct:reserveCredit")
-  .bean(idService, "generateCustomId") // generate a custom Id and set it in the body
-  .to("direct:creditReservation")
-
-// delegate action
-from("direct:creditReservation")
-  .saga()
-  .propagation(SagaPropagation.SUPPORTS)
-  .option("CreditId", body()) // mark the current body as needed in the compensating action
-  .compensation("direct:creditRefund")
-    .bean(creditService, "reserveCredit")
-    .log("Credit ${header.amount} reserved. Custom Id used is ${body}");
-
-// called only if the saga is cancelled
-from("direct:creditRefund")
-  .transform(header("CreditId")) // retrieve the CreditId option from headers
-  .bean(creditService, "refundCredit")
-  .log("Credit for Custom Id ${body} refunded");
-
-

XML DSL 示例:

-
<route>
-  <from uri="direct:start"/>
-  <saga>
-    <compensation uri="direct:compensation" />
-    <completion uri="direct:completion" />
-    <option optionName="myOptionKey">
-      <constant>myOptionValue</constant>
-    </option>
-    <option optionName="myOptionKey2">
-      <constant>myOptionValue2</constant>
-    </option>
-  </saga>
-  <to uri="direct:action1" />
-  <to uri="direct:action2" />
-</route>
-
-

-

Eventuate Tram Saga

-

Eventuate Tram Saga[4] 框架是使用 JDBC / JPA 的 Java 微服务的一个 Saga 框架。它也和 Camel Saga 一样采用了 Java DSL 来定义补偿操作:

-
public class CreateOrderSaga implements SimpleSaga<CreateOrderSagaData> {
-
-  private SagaDefinition<CreateOrderSagaData> sagaDefinition =
-          step()
-            .withCompensation(this::reject)
-          .step()
-            .invokeParticipant(this::reserveCredit)
-          .step()
-            .invokeParticipant(this::approve)
-          .build();
-
-
-  @Override
-  public SagaDefinition<CreateOrderSagaData> getSagaDefinition() {
-    return this.sagaDefinition;
-  }
-
-
-  private CommandWithDestination reserveCredit(CreateOrderSagaData data) {
-    long orderId = data.getOrderId();
-    Long customerId = data.getOrderDetails().getCustomerId();
-    Money orderTotal = data.getOrderDetails().getOrderTotal();
-    return send(new ReserveCreditCommand(customerId, orderId, orderTotal))
-            .to("customerService")
-            .build();
-
-...
-
-

-

Apache ServiceComb Saga

-

ServiceComb Saga[5] 也是一个微服务应用的数据最终一致性解决方案。相对于 TCC 而言,在 try 阶段,Saga 会直接提交事务,后续 rollback 阶段则通过反向的补偿操作来完成。与前面两种不同是它是采用 Java 注解+拦截器的方式来进行“补偿”服务的定义。

-

-

架构:

-

Saga 是由 alpha 和 **omega **组成,其中:

-
    -
  • alpha 充当协调者的角色,主要负责对事务进行管理和协调;
  • -
  • omega 是微服务中内嵌的一个 agent,负责对网络请求进行拦截并向 alpha 上报事务事件;
  • -
-

下图展示了 alpha,omega 以及微服务三者的关系:
-ServiceComb Saga

-

-

使用示例:

-
public class ServiceA extends AbsService implements IServiceA {
-
-  private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  @Autowired
-  private IServiceB serviceB;
-
-  @Autowired
-  private IServiceC serviceC;
-
-  @Override
-  public String getServiceName() {
-    return "servicea";
-  }
-
-  @Override
-  public String getTableName() {
-    return "testa";
-  }
-
-  @Override
-  @SagaStart
-  @Compensable(compensationMethod = "cancelRun")
-  @Transactional(rollbackFor = Exception.class)
-  public Object run(InvokeContext invokeContext) throws Exception {
-    LOG.info("A.run called");
-    doRunBusi();
-    if (invokeContext.isInvokeB(getServiceName())) {
-      serviceB.run(invokeContext);
-    }
-    if (invokeContext.isInvokeC(getServiceName())) {
-      serviceC.run(invokeContext);
-    }
-    if (invokeContext.isException(getServiceName())) {
-      LOG.info("A.run exception");
-      throw new Exception("A.run exception");
-    }
-    return null;
-  }
-
-  public void cancelRun(InvokeContext invokeContext) {
-    LOG.info("A.cancel called");
-    doCancelBusi();
-  }
-
-

-

蚂蚁金服的实践

-

蚂蚁金服内部大规模在使用 TCC 模式分布式事务,主要用于金融核心等对一致性要求高、性能要求高的场景。在更上层的业务系统因为流程多流程长,开发 TCC 成本比较高,大都会权衡采用 Saga 模式来到达业务最终一致性,由于历史的原因不同的 BU 有自己的一套“补偿”事务的方案,基本上是两种:

-
    -
  • 一种是当一个服务在失败时需要“重试”或“补偿”时,在执行服务前在数据库插入一条记录,记录状态,当异常时通过定时任务去查询数据库记录并进行“重试”或“补偿”,当业务流程执行成功则删除记录;
  • -
  • 另一种是设计一个状态机引擎和简单的 DSL,编排业务流程和记录业务状态,状态机引擎可以定义“补偿服务”,当异常时由状态机引擎反向调用“补偿服务”进行回滚,同时还会有一个“差错守护”平台,监控那些执行失败或补偿失败的业务流水,并不断进行“补偿”或“重试”;
  • -
-

-

方案对比

-

社区和业界的解决方案一般是两种,一种基本状态机或流程引擎通过 DSL 方式编排流程程和补偿定义,一种是基于 Java 注解+拦截器实现补偿,那么这两种方案有什么优缺点呢?

- - - - - - - - - - - - - - - - - - - - -
方式优点缺点
状态机+DSL
- 可以用可视化工具来定义业务流程,标准化,可读性高,可实现服务编排的功能
- 提高业务分析人员与程序开发人员的沟通效率
- 业务状态管理:流程本质就是一个状态机,可以很好的反映业务状态的流转
- 提高异常处理灵活性:可以实现宕机恢复后的“向前重试”或“向后补偿”
- 天然可以使用 Actor 模型或 SEDA 架构等异步处理引擎来执行,提高整体吞吐量

- 业务流程实际是由 JAVA 程序与 DSL 配置组成,程序与配置分离,开发起来比较繁琐
- 如果是改造现有业务,对业务侵入性高
- 引擎实现成本高
拦截器+java 注解
- 程序与注解是在一起的,开发简单,学习成本低
- 方便接入现有业务
- 基于动态代理拦截器,框架实现成本低

- 框架无法提供 Actor 模型或 SEDA 架构等异步处理模式来提高系统吞吐量
- 框架无法提供业务状态管理
- 难以实现宕机恢复后的“向前重试”,因为无法恢复线程上下文
-

-

-

-

Seata Saga 的方案

-

Seata Saga 的简介可以看一下《Seata Saga 官网文档》[6]。

-

Seata Saga 采用了状态机+DSL 方案来实现,原因有以下几个:

-
    -
  • 状态机+DSL 方案在实际生产中应用更广泛;
  • -
  • 可以使用 Actor 模型或 SEDA 架构等异步处理引擎来执行,提高整体吞吐量;
  • -
  • 通常在核心系统以上层的业务系统会伴随有“服务编排”的需求,而服务编排又有事务最终一致性要求,两者很难分割开,状态机+DSL 方案可以同时满足这两个需求;
  • -
  • 由于 Saga 模式在理论上是不保证隔离性的,在极端情况下可能由于脏写无法完成回滚操作,比如举一个极端的例子, 分布式事务内先给用户 A 充值,然后给用户 B 扣减余额,如果在给A用户充值成功,在事务提交以前,A 用户把线消费掉了,如果事务发生回滚,这时则没有办法进行补偿了,有些业务场景可以允许让业务最终成功,在回滚不了的情况下可以继续重试完成后面的流程,状态机+DSL的方案可以实现“向前”恢复上下文继续执行的能力, 让业务最终执行成功,达到最终一致性的目的。
  • -
-
-

在不保证隔离性的情况下:业务流程设计时要遵循“宁可长款, 不可短款”的原则,长款意思是客户少了线机构多了钱,以机构信誉可以给客户退款,反之则是短款,少的线可能追不回来了。所以在业务流程设计上一定是先扣款。

-
-

-

状态定义语言(Seata State Language)

-
    -
  1. 通过状态图来定义服务调用的流程并生成 json 状态语言定义文件;
  2. -
  3. 状态图中一个节点可以是调用一个服务,节点可以配置它的补偿节点;
  4. -
  5. 状态图 json 由状态机引擎驱动执行,当出现异常时状态引擎反向执行已成功节点对应的补偿节点将事务回滚;
  6. -
-
-

注意: 异常发生时是否进行补偿也可由用户自定义决定

-
-
    -
  1. 可以实现服务编排需求,支持单项选择、并发、异步、子状态机、参数转换、参数映射、服务执行状态判断、异常捕获等功能;
  2. -
-

假设有一个业务流程要调两个服务,先调库存扣减(InventoryService),再调余额扣减(BalanceService),保证在一个分布式内要么同时成功,要么同时回滚。两个参与者服务都有一个 reduce 方法,表示库存扣减或余额扣减,还有一个 compensateReduce 方法,表示补偿扣减操作。以 InventoryService 为例看一下它的接口定义:

-
public interface InventoryService {
-
-    /**
-     * reduce
-     * @param businessKey
-     * @param amount
-     * @param params
-     * @return
-     */
-    boolean reduce(String businessKey, BigDecimal amount, Map<String, Object> params);
-
-    /**
-     * compensateReduce
-     * @param businessKey
-     * @param params
-     * @return
-     */
-    boolean compensateReduce(String businessKey, Map<String, Object> params);
-}
-
-

这个业务流程对应的状态图:

-

示例状态图 -
对应的 JSON

-
{
-    "Name": "reduceInventoryAndBalance",
-    "Comment": "reduce inventory then reduce balance in a transaction",
-    "StartState": "ReduceInventory",
-    "Version": "0.0.1",
-    "States": {
-        "ReduceInventory": {
-            "Type": "ServiceTask",
-            "ServiceName": "inventoryAction",
-            "ServiceMethod": "reduce",
-            "CompensateState": "CompensateReduceInventory",
-            "Next": "ChoiceState",
-            "Input": [
-                "$.[businessKey]",
-                "$.[count]"
-            ],
-            "Output": {
-                "reduceInventoryResult": "$.#root"
-            },
-            "Status": {
-                "#root == true": "SU",
-                "#root == false": "FA",
-                "$Exception{java.lang.Throwable}": "UN"
-            }
-        },
-        "ChoiceState":{
-            "Type": "Choice",
-            "Choices":[
-                {
-                    "Expression":"[reduceInventoryResult] == true",
-                    "Next":"ReduceBalance"
-                }
-            ],
-            "Default":"Fail"
-        },
-        "ReduceBalance": {
-            "Type": "ServiceTask",
-            "ServiceName": "balanceAction",
-            "ServiceMethod": "reduce",
-            "CompensateState": "CompensateReduceBalance",
-            "Input": [
-                "$.[businessKey]",
-                "$.[amount]",
-                {
-                    "throwException" : "$.[mockReduceBalanceFail]"
-                }
-            ],
-            "Output": {
-                "compensateReduceBalanceResult": "$.#root"
-            },
-            "Status": {
-                "#root == true": "SU",
-                "#root == false": "FA",
-                "$Exception{java.lang.Throwable}": "UN"
-            },
-            "Catch": [
-                {
-                    "Exceptions": [
-                        "java.lang.Throwable"
-                    ],
-                    "Next": "CompensationTrigger"
-                }
-            ],
-            "Next": "Succeed"
-        },
-        "CompensateReduceInventory": {
-            "Type": "ServiceTask",
-            "ServiceName": "inventoryAction",
-            "ServiceMethod": "compensateReduce",
-            "Input": [
-                "$.[businessKey]"
-            ]
-        },
-        "CompensateReduceBalance": {
-            "Type": "ServiceTask",
-            "ServiceName": "balanceAction",
-            "ServiceMethod": "compensateReduce",
-            "Input": [
-                "$.[businessKey]"
-            ]
-        },
-        "CompensationTrigger": {
-            "Type": "CompensationTrigger",
-            "Next": "Fail"
-        },
-        "Succeed": {
-            "Type":"Succeed"
-        },
-        "Fail": {
-            "Type":"Fail",
-            "ErrorCode": "PURCHASE_FAILED",
-            "Message": "purchase failed"
-        }
-    }
-}
-
-

状态语言在一定程度上参考了 AWS Step Functions[7]。

-

-

"状态机" 属性简介:

-
    -
  • Name: 表示状态机的名称,必须唯一;
  • -
  • Comment: 状态机的描述;
  • -
  • Version: 状态机定义版本;
  • -
  • StartState: 启动时运行的第一个"状态";
  • -
  • States: 状态列表,是一个 map 结构,key 是"状态"的名称,在状态机内必须唯一;
  • -
-

-

"状态" 属性简介:

-
    -
  • Type:"状态" 的类型,比如有: -
      -
    • ServiceTask: 执行调用服务任务;
    • -
    • Choice: 单条件选择路由;
    • -
    • CompensationTrigger: 触发补偿流程;
    • -
    • Succeed: 状态机正常结束;
    • -
    • Fail: 状态机异常结束;
    • -
    • SubStateMachine: 调用子状态机;
    • -
    -
  • -
  • ServiceName: 服务名称,通常是服务的beanId;
  • -
  • ServiceMethod: 服务方法名称;
  • -
  • CompensateState: 该"状态"的补偿"状态";
  • -
  • Input: 调用服务的输入参数列表,是一个数组,对应于服务方法的参数列表, $.表示使用表达式从状态机上下文中取参数,表达使用的 SpringEL[8], 如果是常量直接写值即可;
  • -
  • Output: 将服务返回的参数赋值到状态机上下文中,是一个 map 结构,key 为放入到状态机上文时的 key(状态机上下文也是一个 map),value 中 $. 是表示 SpringEL 表达式,表示从服务的返回参数中取值,#root 表示服务的整个返回参数;
  • -
  • Status: 服务执行状态映射,框架定义了三个状态,SU 成功、FA 失败、UN 未知,我们需要把服务执行的状态映射成这三个状态,帮助框架判断整个事务的一致性,是一个 map 结构,key 是条件表达式,一般是取服务的返回值或抛出的异常进行判断,默认是 SpringEL 表达式判断服务返回参数,带 $Exception{开头表示判断异常类型,value 是当这个条件表达式成立时则将服务执行状态映射成这个值;
  • -
  • Catch: 捕获到异常后的路由;
  • -
  • Next: 服务执行完成后下一个执行的"状态";
  • -
  • Choices: Choice 类型的"状态"里, 可选的分支列表, 分支中的 Expression 为 SpringEL 表达式,Next 为当表达式成立时执行的下一个"状态";
  • -
  • ErrorCode: Fail 类型"状态"的错误码;
  • -
  • Message: Fail 类型"状态"的错误信息;
  • -
-

更多详细的状态语言解释请看《Seata Saga 官网文档》[6http://seata.io/zh-cn/docs/user/saga.html]。

-

-

状态机引擎原理:

-

状态机引擎原理

-
    -
  • 图中的状态图是先执行 stateA, 再执行 stataB,然后执行 stateC;
  • -
  • "状态"的执行是基于事件驱动的模型,stataA 执行完成后,会产生路由消息放入 EventQueue,事件消费端从 EventQueue 取出消息,执行 stateB;
  • -
  • 在整个状态机启动时会调用 Seata Server 开启分布式事务,并生产 xid, 然后记录"状态机实例"启动事件到本地数据库;
  • -
  • 当执行到一个"状态"时会调用 Seata Server 注册分支事务,并生产 branchId, 然后记录"状态实例"开始执行事件到本地数据库;
  • -
  • 当一个"状态"执行完成后会记录"状态实例"执行结束事件到本地数据库, 然后调用 Seata Server 上报分支事务的状态;
  • -
  • 当整个状态机执行完成,会记录"状态机实例"执行完成事件到本地数据库, 然后调用 Seata Server 提交或回滚分布式事务;
  • -
-

-

状态机引擎设计:

-

状态机引擎设计

-

状态机引擎的设计主要分成三层, 上层依赖下层,从下往上分别是:

-
    -
  • -

    Eventing 层:

    -
      -
    • 实现事件驱动架构, 可以压入事件, 并由消费端消费事件, 本层不关心事件是什么消费端执行什么,由上层实现;
    • -
    -
  • -
  • -

    ProcessController 层:

    -
      -
    • 由于上层的 Eventing 驱动一个“空”流程执行的执行,"state"的行为和路由都未实现,由上层实现;
    • -
    -
  • -
-
-

基于以上两层理论上可以自定义扩展任何"流程"引擎。这两层的设计是参考了内部金融网络平台的设计。

-
-
    -
  • StateMachineEngine 层: -
      -
    • 实现状态机引擎每种 state 的行为和路由逻辑;
    • -
    • 提供 API、状态机语言仓库;
    • -
    -
  • -
-

-

Saga 模式下服务设计的实践经验

-

下面是实践中总结的在 Saga 模式下微服务设计的一些经验,当然这是推荐做法,并不是说一定要 100% 遵循,没有遵循也有“绕过”方案。

-
-

好消息:Seata Saga 模式对微服务的接口参数没有任务要求,这使得 Saga 模式可用于集成遗留系统或外部机构的服务。

-
-

-

允许空补偿

-
    -
  • 空补偿:原服务未执行,补偿服务执行了;
  • -
  • 出现原因: -
      -
    • 原服务 超时(丢包);
    • -
    • Saga 事务触发 回滚;
    • -
    • 未收到原服务请求,先收到补偿请求;
    • -
    -
  • -
-

所以服务设计时需要允许空补偿,即没有找到要补偿的业务主键时返回补偿成功并将原业务主键记录下来。

-

-

防悬挂控制

-
    -
  • 悬挂:补偿服务 比 原服务 先执行;
  • -
  • 出现原因: -
      -
    • 原服务 超时(拥堵);
    • -
    • Saga 事务回滚,触发 回滚;
    • -
    • 拥堵的原服务到达;
    • -
    -
  • -
-

所以要检查当前业务主键是否已经在空补偿记录下来的业务主键中存在,如果存在则要拒绝服务的执行。

-

-

幂等控制

-
    -
  • 原服务与补偿服务都需要保证幂等性, 由于网络可能超时,可以设置重试策略,重试发生时要通过幂等控制避免业务数据重复更新。
  • -
-

-

总结

-

很多时候我们不需要强调强一性,我们基于 BASE 和 Saga 理论去设计更有弹性的系统,在分布式架构下获得更好的性能和容错能力。分布式架构没有银弹,只有适合特定场景的方案,事实上 Seata Saga 是一个具备“服务编排”和“Saga 分布式事务”能力的产品,总结下来它的适用场景是:

-
    -
  • 适用于微服务架构下的“长事务”处理;
  • -
  • 适用于微服务架构下的“服务编排”需求;
  • -
  • 适用于金融核心系统以上的有大量组合服务的业务系统(比如在渠道层、产品层、集成层的系统);
  • -
  • 适用于业务流程中需要集成遗留系统或外部机构提供的服务的场景(这些服务不可变不能对其提出改造要求)。
  • -
-

-

文中涉及相关链接

-

[1]https://github.com/aphyr/dist-sagas/blob/master/sagas.pdf
[2]http://microservices.io/patterns/data/saga.html
[3]Microprofile 的 LRAhttps://github.com/eclipse/microprofile-sandbox/tree/master/proposals/0009-LRA
[4]Eventuate Tram Sagahttps://github.com/eventuate-tram/eventuate-tram-sagas
[5]ServiceComb Sagahttps://github.com/apache/servicecomb-pack
[6]Seata Saga 官网文档http://seata.io/zh-cn/docs/user/saga.html
[7]AWS Step Functionshttps://docs.aws.amazon.com/zh_cn/step-functions/latest/dg/tutorial-creating-lambda-state-machine.html
[8]SpringELhttps://docs.spring.io/spring/docs/4.3.10.RELEASE/spring-framework-reference/html/expressions.html

-
- - - - - - - diff --git a/zh-cn/blog/design-more-flexable-application-by-saga.json b/zh-cn/blog/design-more-flexable-application-by-saga.json deleted file mode 100644 index f9e44c40..00000000 --- a/zh-cn/blog/design-more-flexable-application-by-saga.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "filename": "design-more-flexable-application-by-saga.md", - "__html": "

基于 Seata Saga 设计更有弹性的金融应用

\n

Seata 意为:Simple Extensible Autonomous Transaction Architecture,是一套一站式分布式事务解决方案,提供了 AT、TCC、Saga 和 XA 事务模式,本文详解其中的 Saga 模式。
项目地址:https://github.com/seata/seata

\n

本文作者:屹远(陈龙),蚂蚁金服分布式事务核心研发,Seata Committer。

\n

\n

金融分布式应用开发的痛点

\n

分布式系统有一个比较明显的问题就是,一个业务流程需要组合一组服务。这样的事情在微服务下就更为明显了,因为这需要业务上的一致性的保证。也就是说,如果一个步骤失败了,那么要么回滚到以前的服务调用,要么不断重试保证所有的步骤都成功。---《左耳听风-弹力设计之“补偿事务”》

\n

而在金融领域微服务架构下的业务流程往往会更复杂,流程很长,比如一个互联网微贷业务流程调十几个服务很正常,再加上异常处理的流程那就更复杂了,做过金融业务开发的同学会很有体感。

\n

所以在金融分布式应用开发过程中我们面临一些痛点:

\n
    \n
  • 业务一致性难以保障
  • \n
\n

我们接触到的大多数业务(比如在渠道层、产品层、集成层的系统),为了保障业务最终一致性,往往会采用“补偿”的方式来做,如果没有一个协调器来支持,开发难度是比较大的,每一步都要在 catch 里去处理前面所有的“回滚”操作,这将会形成“箭头形”的代码,可读性及维护性差。或者重试异常的操作,如果重试不成功可能要转异步重试,甚至最后转人工处理。这些都给开发人员带来极大的负担,开发效率低,且容易出错。

\n
    \n
  • 业务状态难以管理
  • \n
\n

业务实体很多、实体的状态也很多,往往做完一个业务活动后就将实体的状态更新到了数据库里,没有一个状态机来管理整个状态的变迁过程,不直观,容易出错,造成业务进入一个不正确的状态。

\n
    \n
  • 幂等性难以保障
  • \n
\n

服务的幂等性是分布式环境下的基本要求,为了保证服务的幂等性往往需要服务开发者逐个去设计,有用数据库唯一键实现的,有用分布式缓存实现的,没有一个统一的方案,开发人员负担大,也容易遗漏,从而造成资损。

\n
    \n
  • 业务监控运维难,缺乏统一的差错守护能力
  • \n
\n

业务的执行情况监控一般通过打印日志,再基于日志监控平台查看,大多数情况是没有问题的,但是如果业务出错,这些监控缺乏当时的业务上下文,对排查问题不友好,往往需要再去数据库里查。同时日志的打印也依赖于开发,容易遗漏。对于补偿事务往往需要有“差错守护触发补偿”、“工人触发补偿”操作,没有统一的差错守护和处理规范,这些都要开发者逐个开发,负担沉重。

\n

\n

理论基础

\n

一些场景下,我们对数据有强一致性的需求时,会采用在业务层上需要使用“两阶段提交”这样的分布式事务方案。而在另外一些场景下,我们并不需要这么强的一致性,那就只需要保证最终一致性就可以了。

\n

例如蚂蚁金服目前在金融核心系统使用的就是 TCC 模式,金融核心系统的特点是一致性要求高(业务上的隔离性)、短流程、并发高。

\n

而在很多金融核心以上的业务(比如在渠道层、产品层、集成层的系统),这些系统的特点是最终一致即可、流程多、流程长、还可能要调用其它公司的服务(如金融网络)。这是如果每个服务都开发 Try、Confirm、Cancel 三个方法成本高。如果事务中有其它公司的服务,也无法要求其它公司的服务也遵循 TCC 这种开发模式。同时流程长,事务边界太长会影响性能。

\n

对于事务我们都知道 ACID,也很熟悉 CAP 理论最多只能满足其中两个,所以,为了提高性能,出现了 ACID 的一个变种 BASE。ACID 强调的是一致性(CAP 中的 C),而 BASE 强调的是可用性(CAP 中的 A)。我们知道,在很多情况下,我们是无法做到强一致性的 ACID 的。特别是我们需要跨多个系统的时候,而且这些系统还不是由一个公司所提供的。BASE 的系统倾向于设计出更加有弹力的系统,在短时间内,就算是有数据不同步的风险,我们也应该允许新的交易可以发生,而后面我们在业务上将可能出现问题的事务通过补偿的方式处理掉,以保证最终的一致性。

\n

所以我们在实际开发中会进行取舍,对于更多的金融核心以上的业务系统可以采用补偿事务,补偿事务处理方面在30年前就提出了 Saga 理论,随着微服务的发展,近些年才逐步受到大家的关注。目前业界比较也公认 Saga 是作为长事务的解决方案。

\n
\n

https://github.com/aphyr/dist-sagas/blob/master/sagas.pdf[1]\nhttp://microservices.io/patterns/data/saga.html[2]

\n
\n

\n

社区和业界的方案

\n

\n

Apache Camel Saga

\n

Camel 是实现 EIP(Enterprise Integration Patterns)企业集成模式的一款开源产品,它基于事件驱动的架构,有着良好的性能和吞吐量,它在2.21版本新增加了 Saga EIP。

\n

Saga EIP 提供了一种方式可以通过 camel route 定义一系列有关联关系的 Action,这些 Action 要么都执行成功,要么都回滚,Saga 可以协调任何通讯协议的分布式服务或本地服务,并达到全局的最终一致性。Saga 不要求整个处理在短时间内完成,因为它不占用任何数据库锁,它可以支持需要长时间处理的请求,从几秒到几天,Camel 的 Saga EIP 是基于 Microprofile 的 LRA[3](Long Running Action),同样也是支持协调任何通讯协议任何语言实现的分布式服务。

\n

Saga 的实现不会对数据进行加锁,而是在给操作定义它的“补偿操作”,当正常流程执行出错的时候触发那些已经执行过的操作的“补偿操作”,将流程回滚掉。“补偿操作”可以在 Camel route 上用 Java 或 XML DSL(Definition Specific Language)来定义。

\n

下面是一个 Java DSL 示例:

\n
// action\nfrom(\"direct:reserveCredit\")\n  .bean(idService, \"generateCustomId\") // generate a custom Id and set it in the body\n  .to(\"direct:creditReservation\")\n\n// delegate action\nfrom(\"direct:creditReservation\")\n  .saga()\n  .propagation(SagaPropagation.SUPPORTS)\n  .option(\"CreditId\", body()) // mark the current body as needed in the compensating action\n  .compensation(\"direct:creditRefund\")\n    .bean(creditService, \"reserveCredit\")\n    .log(\"Credit ${header.amount} reserved. Custom Id used is ${body}\");\n\n// called only if the saga is cancelled\nfrom(\"direct:creditRefund\")\n  .transform(header(\"CreditId\")) // retrieve the CreditId option from headers\n  .bean(creditService, \"refundCredit\")\n  .log(\"Credit for Custom Id ${body} refunded\");\n
\n

XML DSL 示例:

\n
<route>\n  <from uri=\"direct:start\"/>\n  <saga>\n    <compensation uri=\"direct:compensation\" />\n    <completion uri=\"direct:completion\" />\n    <option optionName=\"myOptionKey\">\n      <constant>myOptionValue</constant>\n    </option>\n    <option optionName=\"myOptionKey2\">\n      <constant>myOptionValue2</constant>\n    </option>\n  </saga>\n  <to uri=\"direct:action1\" />\n  <to uri=\"direct:action2\" />\n</route>\n
\n

\n

Eventuate Tram Saga

\n

Eventuate Tram Saga[4] 框架是使用 JDBC / JPA 的 Java 微服务的一个 Saga 框架。它也和 Camel Saga 一样采用了 Java DSL 来定义补偿操作:

\n
public class CreateOrderSaga implements SimpleSaga<CreateOrderSagaData> {\n\n  private SagaDefinition<CreateOrderSagaData> sagaDefinition =\n          step()\n            .withCompensation(this::reject)\n          .step()\n            .invokeParticipant(this::reserveCredit)\n          .step()\n            .invokeParticipant(this::approve)\n          .build();\n\n\n  @Override\n  public SagaDefinition<CreateOrderSagaData> getSagaDefinition() {\n    return this.sagaDefinition;\n  }\n\n\n  private CommandWithDestination reserveCredit(CreateOrderSagaData data) {\n    long orderId = data.getOrderId();\n    Long customerId = data.getOrderDetails().getCustomerId();\n    Money orderTotal = data.getOrderDetails().getOrderTotal();\n    return send(new ReserveCreditCommand(customerId, orderId, orderTotal))\n            .to(\"customerService\")\n            .build();\n\n...\n
\n

\n

Apache ServiceComb Saga

\n

ServiceComb Saga[5] 也是一个微服务应用的数据最终一致性解决方案。相对于 TCC 而言,在 try 阶段,Saga 会直接提交事务,后续 rollback 阶段则通过反向的补偿操作来完成。与前面两种不同是它是采用 Java 注解+拦截器的方式来进行“补偿”服务的定义。

\n

\n

架构:

\n

Saga 是由 alpha 和 **omega **组成,其中:

\n
    \n
  • alpha 充当协调者的角色,主要负责对事务进行管理和协调;
  • \n
  • omega 是微服务中内嵌的一个 agent,负责对网络请求进行拦截并向 alpha 上报事务事件;
  • \n
\n

下图展示了 alpha,omega 以及微服务三者的关系:
\n\"ServiceComb

\n

\n

使用示例:

\n
public class ServiceA extends AbsService implements IServiceA {\n\n  private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());\n\n  @Autowired\n  private IServiceB serviceB;\n\n  @Autowired\n  private IServiceC serviceC;\n\n  @Override\n  public String getServiceName() {\n    return \"servicea\";\n  }\n\n  @Override\n  public String getTableName() {\n    return \"testa\";\n  }\n\n  @Override\n  @SagaStart\n  @Compensable(compensationMethod = \"cancelRun\")\n  @Transactional(rollbackFor = Exception.class)\n  public Object run(InvokeContext invokeContext) throws Exception {\n    LOG.info(\"A.run called\");\n    doRunBusi();\n    if (invokeContext.isInvokeB(getServiceName())) {\n      serviceB.run(invokeContext);\n    }\n    if (invokeContext.isInvokeC(getServiceName())) {\n      serviceC.run(invokeContext);\n    }\n    if (invokeContext.isException(getServiceName())) {\n      LOG.info(\"A.run exception\");\n      throw new Exception(\"A.run exception\");\n    }\n    return null;\n  }\n\n  public void cancelRun(InvokeContext invokeContext) {\n    LOG.info(\"A.cancel called\");\n    doCancelBusi();\n  }\n
\n

\n

蚂蚁金服的实践

\n

蚂蚁金服内部大规模在使用 TCC 模式分布式事务,主要用于金融核心等对一致性要求高、性能要求高的场景。在更上层的业务系统因为流程多流程长,开发 TCC 成本比较高,大都会权衡采用 Saga 模式来到达业务最终一致性,由于历史的原因不同的 BU 有自己的一套“补偿”事务的方案,基本上是两种:

\n
    \n
  • 一种是当一个服务在失败时需要“重试”或“补偿”时,在执行服务前在数据库插入一条记录,记录状态,当异常时通过定时任务去查询数据库记录并进行“重试”或“补偿”,当业务流程执行成功则删除记录;
  • \n
  • 另一种是设计一个状态机引擎和简单的 DSL,编排业务流程和记录业务状态,状态机引擎可以定义“补偿服务”,当异常时由状态机引擎反向调用“补偿服务”进行回滚,同时还会有一个“差错守护”平台,监控那些执行失败或补偿失败的业务流水,并不断进行“补偿”或“重试”;
  • \n
\n

\n

方案对比

\n

社区和业界的解决方案一般是两种,一种基本状态机或流程引擎通过 DSL 方式编排流程程和补偿定义,一种是基于 Java 注解+拦截器实现补偿,那么这两种方案有什么优缺点呢?

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
方式优点缺点
状态机+DSL
- 可以用可视化工具来定义业务流程,标准化,可读性高,可实现服务编排的功能
- 提高业务分析人员与程序开发人员的沟通效率
- 业务状态管理:流程本质就是一个状态机,可以很好的反映业务状态的流转
- 提高异常处理灵活性:可以实现宕机恢复后的“向前重试”或“向后补偿”
- 天然可以使用 Actor 模型或 SEDA 架构等异步处理引擎来执行,提高整体吞吐量

- 业务流程实际是由 JAVA 程序与 DSL 配置组成,程序与配置分离,开发起来比较繁琐
- 如果是改造现有业务,对业务侵入性高
- 引擎实现成本高
拦截器+java 注解
- 程序与注解是在一起的,开发简单,学习成本低
- 方便接入现有业务
- 基于动态代理拦截器,框架实现成本低

- 框架无法提供 Actor 模型或 SEDA 架构等异步处理模式来提高系统吞吐量
- 框架无法提供业务状态管理
- 难以实现宕机恢复后的“向前重试”,因为无法恢复线程上下文
\n

\n

\n

\n

Seata Saga 的方案

\n

Seata Saga 的简介可以看一下《Seata Saga 官网文档》[6]。

\n

Seata Saga 采用了状态机+DSL 方案来实现,原因有以下几个:

\n
    \n
  • 状态机+DSL 方案在实际生产中应用更广泛;
  • \n
  • 可以使用 Actor 模型或 SEDA 架构等异步处理引擎来执行,提高整体吞吐量;
  • \n
  • 通常在核心系统以上层的业务系统会伴随有“服务编排”的需求,而服务编排又有事务最终一致性要求,两者很难分割开,状态机+DSL 方案可以同时满足这两个需求;
  • \n
  • 由于 Saga 模式在理论上是不保证隔离性的,在极端情况下可能由于脏写无法完成回滚操作,比如举一个极端的例子, 分布式事务内先给用户 A 充值,然后给用户 B 扣减余额,如果在给A用户充值成功,在事务提交以前,A 用户把线消费掉了,如果事务发生回滚,这时则没有办法进行补偿了,有些业务场景可以允许让业务最终成功,在回滚不了的情况下可以继续重试完成后面的流程,状态机+DSL的方案可以实现“向前”恢复上下文继续执行的能力, 让业务最终执行成功,达到最终一致性的目的。
  • \n
\n
\n

在不保证隔离性的情况下:业务流程设计时要遵循“宁可长款, 不可短款”的原则,长款意思是客户少了线机构多了钱,以机构信誉可以给客户退款,反之则是短款,少的线可能追不回来了。所以在业务流程设计上一定是先扣款。

\n
\n

\n

状态定义语言(Seata State Language)

\n
    \n
  1. 通过状态图来定义服务调用的流程并生成 json 状态语言定义文件;
  2. \n
  3. 状态图中一个节点可以是调用一个服务,节点可以配置它的补偿节点;
  4. \n
  5. 状态图 json 由状态机引擎驱动执行,当出现异常时状态引擎反向执行已成功节点对应的补偿节点将事务回滚;
  6. \n
\n
\n

注意: 异常发生时是否进行补偿也可由用户自定义决定

\n
\n
    \n
  1. 可以实现服务编排需求,支持单项选择、并发、异步、子状态机、参数转换、参数映射、服务执行状态判断、异常捕获等功能;
  2. \n
\n

假设有一个业务流程要调两个服务,先调库存扣减(InventoryService),再调余额扣减(BalanceService),保证在一个分布式内要么同时成功,要么同时回滚。两个参与者服务都有一个 reduce 方法,表示库存扣减或余额扣减,还有一个 compensateReduce 方法,表示补偿扣减操作。以 InventoryService 为例看一下它的接口定义:

\n
public interface InventoryService {\n\n    /**\n     * reduce\n     * @param businessKey\n     * @param amount\n     * @param params\n     * @return\n     */\n    boolean reduce(String businessKey, BigDecimal amount, Map<String, Object> params);\n\n    /**\n     * compensateReduce\n     * @param businessKey\n     * @param params\n     * @return\n     */\n    boolean compensateReduce(String businessKey, Map<String, Object> params);\n}\n
\n

这个业务流程对应的状态图:

\n

\"示例状态图\"\n
对应的 JSON

\n
{\n    \"Name\": \"reduceInventoryAndBalance\",\n    \"Comment\": \"reduce inventory then reduce balance in a transaction\",\n    \"StartState\": \"ReduceInventory\",\n    \"Version\": \"0.0.1\",\n    \"States\": {\n        \"ReduceInventory\": {\n            \"Type\": \"ServiceTask\",\n            \"ServiceName\": \"inventoryAction\",\n            \"ServiceMethod\": \"reduce\",\n            \"CompensateState\": \"CompensateReduceInventory\",\n            \"Next\": \"ChoiceState\",\n            \"Input\": [\n                \"$.[businessKey]\",\n                \"$.[count]\"\n            ],\n            \"Output\": {\n                \"reduceInventoryResult\": \"$.#root\"\n            },\n            \"Status\": {\n                \"#root == true\": \"SU\",\n                \"#root == false\": \"FA\",\n                \"$Exception{java.lang.Throwable}\": \"UN\"\n            }\n        },\n        \"ChoiceState\":{\n            \"Type\": \"Choice\",\n            \"Choices\":[\n                {\n                    \"Expression\":\"[reduceInventoryResult] == true\",\n                    \"Next\":\"ReduceBalance\"\n                }\n            ],\n            \"Default\":\"Fail\"\n        },\n        \"ReduceBalance\": {\n            \"Type\": \"ServiceTask\",\n            \"ServiceName\": \"balanceAction\",\n            \"ServiceMethod\": \"reduce\",\n            \"CompensateState\": \"CompensateReduceBalance\",\n            \"Input\": [\n                \"$.[businessKey]\",\n                \"$.[amount]\",\n                {\n                    \"throwException\" : \"$.[mockReduceBalanceFail]\"\n                }\n            ],\n            \"Output\": {\n                \"compensateReduceBalanceResult\": \"$.#root\"\n            },\n            \"Status\": {\n                \"#root == true\": \"SU\",\n                \"#root == false\": \"FA\",\n                \"$Exception{java.lang.Throwable}\": \"UN\"\n            },\n            \"Catch\": [\n                {\n                    \"Exceptions\": [\n                        \"java.lang.Throwable\"\n                    ],\n                    \"Next\": \"CompensationTrigger\"\n                }\n            ],\n            \"Next\": \"Succeed\"\n        },\n        \"CompensateReduceInventory\": {\n            \"Type\": \"ServiceTask\",\n            \"ServiceName\": \"inventoryAction\",\n            \"ServiceMethod\": \"compensateReduce\",\n            \"Input\": [\n                \"$.[businessKey]\"\n            ]\n        },\n        \"CompensateReduceBalance\": {\n            \"Type\": \"ServiceTask\",\n            \"ServiceName\": \"balanceAction\",\n            \"ServiceMethod\": \"compensateReduce\",\n            \"Input\": [\n                \"$.[businessKey]\"\n            ]\n        },\n        \"CompensationTrigger\": {\n            \"Type\": \"CompensationTrigger\",\n            \"Next\": \"Fail\"\n        },\n        \"Succeed\": {\n            \"Type\":\"Succeed\"\n        },\n        \"Fail\": {\n            \"Type\":\"Fail\",\n            \"ErrorCode\": \"PURCHASE_FAILED\",\n            \"Message\": \"purchase failed\"\n        }\n    }\n}\n
\n

状态语言在一定程度上参考了 AWS Step Functions[7]。

\n

\n

"状态机" 属性简介:

\n
    \n
  • Name: 表示状态机的名称,必须唯一;
  • \n
  • Comment: 状态机的描述;
  • \n
  • Version: 状态机定义版本;
  • \n
  • StartState: 启动时运行的第一个"状态";
  • \n
  • States: 状态列表,是一个 map 结构,key 是"状态"的名称,在状态机内必须唯一;
  • \n
\n

\n

"状态" 属性简介:

\n
    \n
  • Type:"状态" 的类型,比如有:\n
      \n
    • ServiceTask: 执行调用服务任务;
    • \n
    • Choice: 单条件选择路由;
    • \n
    • CompensationTrigger: 触发补偿流程;
    • \n
    • Succeed: 状态机正常结束;
    • \n
    • Fail: 状态机异常结束;
    • \n
    • SubStateMachine: 调用子状态机;
    • \n
    \n
  • \n
  • ServiceName: 服务名称,通常是服务的beanId;
  • \n
  • ServiceMethod: 服务方法名称;
  • \n
  • CompensateState: 该"状态"的补偿"状态";
  • \n
  • Input: 调用服务的输入参数列表,是一个数组,对应于服务方法的参数列表, $.表示使用表达式从状态机上下文中取参数,表达使用的 SpringEL[8], 如果是常量直接写值即可;
  • \n
  • Output: 将服务返回的参数赋值到状态机上下文中,是一个 map 结构,key 为放入到状态机上文时的 key(状态机上下文也是一个 map),value 中 $. 是表示 SpringEL 表达式,表示从服务的返回参数中取值,#root 表示服务的整个返回参数;
  • \n
  • Status: 服务执行状态映射,框架定义了三个状态,SU 成功、FA 失败、UN 未知,我们需要把服务执行的状态映射成这三个状态,帮助框架判断整个事务的一致性,是一个 map 结构,key 是条件表达式,一般是取服务的返回值或抛出的异常进行判断,默认是 SpringEL 表达式判断服务返回参数,带 $Exception{开头表示判断异常类型,value 是当这个条件表达式成立时则将服务执行状态映射成这个值;
  • \n
  • Catch: 捕获到异常后的路由;
  • \n
  • Next: 服务执行完成后下一个执行的"状态";
  • \n
  • Choices: Choice 类型的"状态"里, 可选的分支列表, 分支中的 Expression 为 SpringEL 表达式,Next 为当表达式成立时执行的下一个"状态";
  • \n
  • ErrorCode: Fail 类型"状态"的错误码;
  • \n
  • Message: Fail 类型"状态"的错误信息;
  • \n
\n

更多详细的状态语言解释请看《Seata Saga 官网文档》[6http://seata.io/zh-cn/docs/user/saga.html]。

\n

\n

状态机引擎原理:

\n

\"状态机引擎原理\"

\n
    \n
  • 图中的状态图是先执行 stateA, 再执行 stataB,然后执行 stateC;
  • \n
  • "状态"的执行是基于事件驱动的模型,stataA 执行完成后,会产生路由消息放入 EventQueue,事件消费端从 EventQueue 取出消息,执行 stateB;
  • \n
  • 在整个状态机启动时会调用 Seata Server 开启分布式事务,并生产 xid, 然后记录"状态机实例"启动事件到本地数据库;
  • \n
  • 当执行到一个"状态"时会调用 Seata Server 注册分支事务,并生产 branchId, 然后记录"状态实例"开始执行事件到本地数据库;
  • \n
  • 当一个"状态"执行完成后会记录"状态实例"执行结束事件到本地数据库, 然后调用 Seata Server 上报分支事务的状态;
  • \n
  • 当整个状态机执行完成,会记录"状态机实例"执行完成事件到本地数据库, 然后调用 Seata Server 提交或回滚分布式事务;
  • \n
\n

\n

状态机引擎设计:

\n

\"状态机引擎设计\"

\n

状态机引擎的设计主要分成三层, 上层依赖下层,从下往上分别是:

\n
    \n
  • \n

    Eventing 层:

    \n
      \n
    • 实现事件驱动架构, 可以压入事件, 并由消费端消费事件, 本层不关心事件是什么消费端执行什么,由上层实现;
    • \n
    \n
  • \n
  • \n

    ProcessController 层:

    \n
      \n
    • 由于上层的 Eventing 驱动一个“空”流程执行的执行,"state"的行为和路由都未实现,由上层实现;
    • \n
    \n
  • \n
\n
\n

基于以上两层理论上可以自定义扩展任何"流程"引擎。这两层的设计是参考了内部金融网络平台的设计。

\n
\n
    \n
  • StateMachineEngine 层:\n
      \n
    • 实现状态机引擎每种 state 的行为和路由逻辑;
    • \n
    • 提供 API、状态机语言仓库;
    • \n
    \n
  • \n
\n

\n

Saga 模式下服务设计的实践经验

\n

下面是实践中总结的在 Saga 模式下微服务设计的一些经验,当然这是推荐做法,并不是说一定要 100% 遵循,没有遵循也有“绕过”方案。

\n
\n

好消息:Seata Saga 模式对微服务的接口参数没有任务要求,这使得 Saga 模式可用于集成遗留系统或外部机构的服务。

\n
\n

\n

允许空补偿

\n
    \n
  • 空补偿:原服务未执行,补偿服务执行了;
  • \n
  • 出现原因:\n
      \n
    • 原服务 超时(丢包);
    • \n
    • Saga 事务触发 回滚;
    • \n
    • 未收到原服务请求,先收到补偿请求;
    • \n
    \n
  • \n
\n

所以服务设计时需要允许空补偿,即没有找到要补偿的业务主键时返回补偿成功并将原业务主键记录下来。

\n

\n

防悬挂控制

\n
    \n
  • 悬挂:补偿服务 比 原服务 先执行;
  • \n
  • 出现原因:\n
      \n
    • 原服务 超时(拥堵);
    • \n
    • Saga 事务回滚,触发 回滚;
    • \n
    • 拥堵的原服务到达;
    • \n
    \n
  • \n
\n

所以要检查当前业务主键是否已经在空补偿记录下来的业务主键中存在,如果存在则要拒绝服务的执行。

\n

\n

幂等控制

\n
    \n
  • 原服务与补偿服务都需要保证幂等性, 由于网络可能超时,可以设置重试策略,重试发生时要通过幂等控制避免业务数据重复更新。
  • \n
\n

\n

总结

\n

很多时候我们不需要强调强一性,我们基于 BASE 和 Saga 理论去设计更有弹性的系统,在分布式架构下获得更好的性能和容错能力。分布式架构没有银弹,只有适合特定场景的方案,事实上 Seata Saga 是一个具备“服务编排”和“Saga 分布式事务”能力的产品,总结下来它的适用场景是:

\n
    \n
  • 适用于微服务架构下的“长事务”处理;
  • \n
  • 适用于微服务架构下的“服务编排”需求;
  • \n
  • 适用于金融核心系统以上的有大量组合服务的业务系统(比如在渠道层、产品层、集成层的系统);
  • \n
  • 适用于业务流程中需要集成遗留系统或外部机构提供的服务的场景(这些服务不可变不能对其提出改造要求)。
  • \n
\n

\n

文中涉及相关链接

\n

[1]https://github.com/aphyr/dist-sagas/blob/master/sagas.pdf
[2]http://microservices.io/patterns/data/saga.html
[3]Microprofile 的 LRAhttps://github.com/eclipse/microprofile-sandbox/tree/master/proposals/0009-LRA
[4]Eventuate Tram Sagahttps://github.com/eventuate-tram/eventuate-tram-sagas
[5]ServiceComb Sagahttps://github.com/apache/servicecomb-pack
[6]Seata Saga 官网文档http://seata.io/zh-cn/docs/user/saga.html
[7]AWS Step Functionshttps://docs.aws.amazon.com/zh_cn/step-functions/latest/dg/tutorial-creating-lambda-state-machine.html
[8]SpringELhttps://docs.spring.io/spring/docs/4.3.10.RELEASE/spring-framework-reference/html/expressions.html

\n", - "link": "/zh-cn/blog/design-more-flexable-application-by-saga.html", - "meta": { - "title": "基于 Seata Saga 设计更有弹性的金融应用", - "keywords": "Saga,Seata,一致性,金融,弹性,分布式,事务", - "description": "本文从金融分布式应用开发的一些痛点出发,结合理论和实践对社区和行业的解决方案进行了分析,并讲解了如何基于Seata saga设计更有弹性的金融应用", - "author": "long187", - "date": "2019-11-04" - } -} \ No newline at end of file diff --git a/zh-cn/blog/download.html b/zh-cn/blog/download.html deleted file mode 100644 index 1716be52..00000000 --- a/zh-cn/blog/download.html +++ /dev/null @@ -1,331 +0,0 @@ - - - - - - - - - - 下载中心 - - - - -

下载中心

-

Seata

-
-

GitHub: https://github.com/seata/seata
-发布说明: https://github.com/seata/seata/releases

-
-

0.9.0 (2019-10-16)

-

source | -binary

-
- Release notes -

Seata 0.9.0

-

Seata 0.9.0 正式发布。

-

Seata 是一款开源的分布式事务解决方案,提供高性能和简单易用的分布式事务服务。

-

此版本更新如下:

-

feature:

-
    -
  • [#1608] 长事务解决方案: Saga 模式(基于状态机实现)
  • -
  • [#1625] 支持自定义配置和注册中心类型
  • -
  • [#1656] 支持 spring cloud config 配置中心
  • -
  • [#1689] 支持 -e 启动参数,用于指定环境名称
  • -
  • [#1739] 支持 TM commit 或rollback 失败时的重试
  • -
-

bugfix:

-
    -
  • [#1605] 修复对象锁和全局锁可能造成的死锁和优化锁的粒度
  • -
  • [#1685] 修复db存储类异常被忽略的问题
  • -
  • [#1691] 修复 DruidDataSourceWrapper 反射问题
  • -
  • [#1699] 修复 mysql 和 oracle 中 'in' 和 'between' 在 where 条件的支持
  • -
  • [#1713] 修复 LockManagerTest.concurrentUseAbilityTest 中的测试条件
  • -
  • [#1720] 修复了不能获取 oracle tableMeta 问题
  • -
  • [#1729] 修复 oracle 的批量获取问题
  • -
  • [#1735] 修复当 TM commit 或 rollback 出现网络异常无法清除 xid 的问题
  • -
  • [#1749] 修复无法获取 oracle tableMeta cache 问题
  • -
  • [#1751] 修复文件存储模式下由于hash冲突导致的锁无法释放问题
  • -
  • [#1761] 修复 oracle 在回滚时 Blob 或 Clob null 值回滚失败问题
  • -
  • [#1759] 修复 saga 模式下 service method 不支持接口类型参数问题
  • -
  • [#1401] 修复 RM 启动时第一次注册 resource 为 null 的问题
  • -
-

optimize:

-
    -
  • [#1701] 移除无用的 imports
  • -
  • [#1705] 优化了一些基于 java5 的语法结构
  • -
  • [#1706] 将内部类声明为 static
  • -
  • [#1707] 使用 StandardCharsets.UTF_8 代替 utf-8 编码
  • -
  • [#1712] 抽象 undologManager 的通用方法
  • -
  • [#1722] 简化代码提高代码的可读性
  • -
  • [#1726] 格式化日志输出
  • -
  • [#1738] 增加 seata-server jvm 参数
  • -
  • [#1743] 提高批量打印日志的性能
  • -
  • [#1747] 使用基本类型避免数据装箱
  • -
  • [#1750] 抽象 tableMetaCache 方法
  • -
  • [#1755] 提高 seata-common 模块的单测覆盖率
  • -
  • [#1756] 升级 jackson 版本防止潜在的安全漏洞
  • -
  • [#1657] 优化文件存储模式下文件 rolling 时占用较大 direct buffer的问题
  • -
-

非常感谢以下 contributors 的代码贡献。若有无意遗漏,请报告。

- -

同时,我们收到了社区反馈的很多有价值的issue和建议,非常感谢大家。

-

常用链接

- -
-

0.8.1 (2019-09-18)

-

source | -binary

-
- Release notes -

Seata 0.8.1

-

Seata 0.8.1 正式发布。

-

Seata 是一款开源的分布式事务解决方案,提供高性能和简单易用的分布式事务服务。

-

此版本更新如下:

-

feature:

-
    -
  • [#1598] 支持配置文件使用绝对路径
  • -
  • [#1617] 支持配置文件名称(registry.conf) 可配置
  • -
  • [#1418] 支持 undo_log 数据的 kryo 序列化
  • -
  • [#1489] 支持 protobuf 生成插件
  • -
  • [#1437] 支持通信协议的 kryo 编解码
  • -
  • [#1478] 支持 db mock
  • -
  • [#1512] 扩展支持 mysql 和 oracle 的多种批量插入语法
  • -
  • [#1496] 支持 DataSource 的自动代理
  • -
-

bugfix:

-
    -
  • [#1646] 修复 file 存储模式的 selectForUpdate lockQuery exception
  • -
  • [#1572] 修复在oracle 小写表名时获取 tablemeta 失败问题
  • -
  • [#1663] 修复表名为关键字获取 tablemeta 失败问题
  • -
  • [#1666] 修复数据库连接使用后的 autocommit 问题
  • -
  • [#1643] 修复 java.sql.Blob, java.sql.Clob 类型的序列化
  • -
  • [#1628] 修复 oracle 支持 ROWNUM 查询
  • -
  • [#1552] 修复当分支太大时的 BufferOverflow 问题
  • -
  • [#1609] 修复 oracle 关键字的线程安全问题
  • -
  • [#1599] 修复 mysql 关键字的线程安全问题
  • -
  • [#1607] 修复当druid版本小于1.1.3时 NoSuchMethodError
  • -
  • [#1581] 修复文件存储模式下 GlobalSession 长度计算不准确问题
  • -
  • [#1594] 修复 nacos 配置中心的默认 namespace
  • -
  • [#1550] 修复计算 BranchSession 丢失 xidBytes 长度问题
  • -
  • [#1558] 修复 rpcMessage 的 body 字段 NPE问题
  • -
  • [#1505] 修复绑定公网注册地址server监听失败问题
  • -
  • [#1539] 修复 nacos namespace 配置项不生效
  • -
  • [#1537] 修复 nacos-config.txt 缺失 store.db.driver-class-name 配置项
  • -
  • [#1522] 修复 ProtocolV1CodecTest 中 testAll 运行中可能出现测试失败问题
  • -
  • [#1525] 修复当 getAfterImage 获取失败时,事务自动被提交问题
  • -
  • [#1518] 修复 EnhancedServiceLoader SPI 顺序加载第三方依赖失败问题
  • -
  • [#1514] 修复当缺少序列化依赖无法生成undolog并report true问题
  • -
  • [#1445] 修复 DefaultCoordinatorMetricsTest 单测失败问题
  • -
  • [#1481] 修复 TableMetaCache 在多数据源刷新失败问题
  • -
-

optimize:

-
    -
  • [#1629] 优化etcd3中watcher订阅的效率
  • -
  • [#1661] 优化 global_table 中 transaction_name 长度问题
  • -
  • [#1633] 优化分支事务获取全局锁失败重复report(false)问题
  • -
  • [#1654] 优化 slf4j 的错误使用
  • -
  • [#1593] 优化和规范化 server 的日志
  • -
  • [#1648] 优化 transaction_name 在建表时的长度
  • -
  • [#1576] 消除重排序对 session 异步提交的影响
  • -
  • [#1618] 优化 undolog manager 和 修复oracle undolog 的删除
  • -
  • [#1469] 提供不释放数据库锁情况下等待全局锁的释放以减少锁冲突
  • -
  • [#1619] 使用 StringBuffer 代替 StringBuilder
  • -
  • [#1580] 优化 LockKeyConflictException 和更改 register 方法
  • -
  • [#1574] 优化db存储模式下globalCommit 一次性删除全局锁
  • -
  • [#1601] 优化 typo
  • -
  • [#1602] 升级 fastjson 版本至 1.2.60 应对安全漏洞
  • -
  • [#1583] 优化 oracle 主键的获取
  • -
  • [#1575] 增加 RegisterTMRequest 的单元测试
  • -
  • [#1559] 启动时延迟删除过期 undo_log
  • -
  • [#1547] 删除 TableRecords 的 jackson 注解
  • -
  • [#1542] 优化 AbstractSessionManager 日志
  • -
  • [#1535] 去除 H2 和 pgsql 获取主键代码,修复 resultset 关闭问题
  • -
  • [#1541] 代码清理
  • -
  • [#1544] 去除中文注释
  • -
  • [#1533] 重构多环境配置的代码逻辑
  • -
  • [#1493] 增加 tableMeta 检测任务开关
  • -
  • [#1530] 优化当数据表无索引时抛出显式异常
  • -
  • [#1444] 简化map操作
  • -
  • [#1497] 增加 seata-all 依赖
  • -
  • [#1490] 移除不必要代码
  • -
-

非常感谢以下 contributors 的代码贡献。若有无意遗漏,请报告。

- -

同时,我们收到了社区反馈的很多有价值的issue和建议,非常感谢大家。

-

常用链接

- -
-

0.8.0 (2019-08-16)

- -

0.7.1 (2019-07-15)

- -

0.7.0 (2019-07-12)

- -

0.6.1 (2019-05-31)

- -

0.6.0 (2019-05-24)

- -

0.5.2 (2019-05-17)

- -

0.5.1 (2019-04-30)

- -

0.5.0 (2019-04-19)

- -

0.4.2 (2019-04-12)

- -

0.4.1 (2019-03-29)

- -

0.4.0 (2019-03-19)

- -

0.3.1 (2019-03-15)

- -

0.3.0 (2019-03-08)

- -

0.2.3 (2019-03-02)

- -

0.2.2 (2019-02-22)

- -

0.2.1 (2019-02-18)

- -

0.2.0 (2019-02-14)

- -

0.1.4 (2019-02-11)

- -

0.1.3 (2019-01-29)

- -

0.1.2 (2019-01-25)

- -

0.1.1 (2019-01-18)

- -

0.1.0 (2019-01-09)

- -
- - - - - - - diff --git a/zh-cn/blog/download.json b/zh-cn/blog/download.json deleted file mode 100644 index f6de6be6..00000000 --- a/zh-cn/blog/download.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "download.md", - "__html": "

下载中心

\n

Seata

\n
\n

GitHub: https://github.com/seata/seata
\n发布说明: https://github.com/seata/seata/releases

\n
\n

0.9.0 (2019-10-16)

\n

source |\nbinary

\n
\n Release notes\n

Seata 0.9.0

\n

Seata 0.9.0 正式发布。

\n

Seata 是一款开源的分布式事务解决方案,提供高性能和简单易用的分布式事务服务。

\n

此版本更新如下:

\n

feature:

\n
    \n
  • [#1608] 长事务解决方案: Saga 模式(基于状态机实现)
  • \n
  • [#1625] 支持自定义配置和注册中心类型
  • \n
  • [#1656] 支持 spring cloud config 配置中心
  • \n
  • [#1689] 支持 -e 启动参数,用于指定环境名称
  • \n
  • [#1739] 支持 TM commit 或rollback 失败时的重试
  • \n
\n

bugfix:

\n
    \n
  • [#1605] 修复对象锁和全局锁可能造成的死锁和优化锁的粒度
  • \n
  • [#1685] 修复db存储类异常被忽略的问题
  • \n
  • [#1691] 修复 DruidDataSourceWrapper 反射问题
  • \n
  • [#1699] 修复 mysql 和 oracle 中 'in' 和 'between' 在 where 条件的支持
  • \n
  • [#1713] 修复 LockManagerTest.concurrentUseAbilityTest 中的测试条件
  • \n
  • [#1720] 修复了不能获取 oracle tableMeta 问题
  • \n
  • [#1729] 修复 oracle 的批量获取问题
  • \n
  • [#1735] 修复当 TM commit 或 rollback 出现网络异常无法清除 xid 的问题
  • \n
  • [#1749] 修复无法获取 oracle tableMeta cache 问题
  • \n
  • [#1751] 修复文件存储模式下由于hash冲突导致的锁无法释放问题
  • \n
  • [#1761] 修复 oracle 在回滚时 Blob 或 Clob null 值回滚失败问题
  • \n
  • [#1759] 修复 saga 模式下 service method 不支持接口类型参数问题
  • \n
  • [#1401] 修复 RM 启动时第一次注册 resource 为 null 的问题
  • \n
\n

optimize:

\n
    \n
  • [#1701] 移除无用的 imports
  • \n
  • [#1705] 优化了一些基于 java5 的语法结构
  • \n
  • [#1706] 将内部类声明为 static
  • \n
  • [#1707] 使用 StandardCharsets.UTF_8 代替 utf-8 编码
  • \n
  • [#1712] 抽象 undologManager 的通用方法
  • \n
  • [#1722] 简化代码提高代码的可读性
  • \n
  • [#1726] 格式化日志输出
  • \n
  • [#1738] 增加 seata-server jvm 参数
  • \n
  • [#1743] 提高批量打印日志的性能
  • \n
  • [#1747] 使用基本类型避免数据装箱
  • \n
  • [#1750] 抽象 tableMetaCache 方法
  • \n
  • [#1755] 提高 seata-common 模块的单测覆盖率
  • \n
  • [#1756] 升级 jackson 版本防止潜在的安全漏洞
  • \n
  • [#1657] 优化文件存储模式下文件 rolling 时占用较大 direct buffer的问题
  • \n
\n

非常感谢以下 contributors 的代码贡献。若有无意遗漏,请报告。

\n\n

同时,我们收到了社区反馈的很多有价值的issue和建议,非常感谢大家。

\n

常用链接

\n\n
\n

0.8.1 (2019-09-18)

\n

source |\nbinary

\n
\n Release notes\n

Seata 0.8.1

\n

Seata 0.8.1 正式发布。

\n

Seata 是一款开源的分布式事务解决方案,提供高性能和简单易用的分布式事务服务。

\n

此版本更新如下:

\n

feature:

\n
    \n
  • [#1598] 支持配置文件使用绝对路径
  • \n
  • [#1617] 支持配置文件名称(registry.conf) 可配置
  • \n
  • [#1418] 支持 undo_log 数据的 kryo 序列化
  • \n
  • [#1489] 支持 protobuf 生成插件
  • \n
  • [#1437] 支持通信协议的 kryo 编解码
  • \n
  • [#1478] 支持 db mock
  • \n
  • [#1512] 扩展支持 mysql 和 oracle 的多种批量插入语法
  • \n
  • [#1496] 支持 DataSource 的自动代理
  • \n
\n

bugfix:

\n
    \n
  • [#1646] 修复 file 存储模式的 selectForUpdate lockQuery exception
  • \n
  • [#1572] 修复在oracle 小写表名时获取 tablemeta 失败问题
  • \n
  • [#1663] 修复表名为关键字获取 tablemeta 失败问题
  • \n
  • [#1666] 修复数据库连接使用后的 autocommit 问题
  • \n
  • [#1643] 修复 java.sql.Blob, java.sql.Clob 类型的序列化
  • \n
  • [#1628] 修复 oracle 支持 ROWNUM 查询
  • \n
  • [#1552] 修复当分支太大时的 BufferOverflow 问题
  • \n
  • [#1609] 修复 oracle 关键字的线程安全问题
  • \n
  • [#1599] 修复 mysql 关键字的线程安全问题
  • \n
  • [#1607] 修复当druid版本小于1.1.3时 NoSuchMethodError
  • \n
  • [#1581] 修复文件存储模式下 GlobalSession 长度计算不准确问题
  • \n
  • [#1594] 修复 nacos 配置中心的默认 namespace
  • \n
  • [#1550] 修复计算 BranchSession 丢失 xidBytes 长度问题
  • \n
  • [#1558] 修复 rpcMessage 的 body 字段 NPE问题
  • \n
  • [#1505] 修复绑定公网注册地址server监听失败问题
  • \n
  • [#1539] 修复 nacos namespace 配置项不生效
  • \n
  • [#1537] 修复 nacos-config.txt 缺失 store.db.driver-class-name 配置项
  • \n
  • [#1522] 修复 ProtocolV1CodecTest 中 testAll 运行中可能出现测试失败问题
  • \n
  • [#1525] 修复当 getAfterImage 获取失败时,事务自动被提交问题
  • \n
  • [#1518] 修复 EnhancedServiceLoader SPI 顺序加载第三方依赖失败问题
  • \n
  • [#1514] 修复当缺少序列化依赖无法生成undolog并report true问题
  • \n
  • [#1445] 修复 DefaultCoordinatorMetricsTest 单测失败问题
  • \n
  • [#1481] 修复 TableMetaCache 在多数据源刷新失败问题
  • \n
\n

optimize:

\n
    \n
  • [#1629] 优化etcd3中watcher订阅的效率
  • \n
  • [#1661] 优化 global_table 中 transaction_name 长度问题
  • \n
  • [#1633] 优化分支事务获取全局锁失败重复report(false)问题
  • \n
  • [#1654] 优化 slf4j 的错误使用
  • \n
  • [#1593] 优化和规范化 server 的日志
  • \n
  • [#1648] 优化 transaction_name 在建表时的长度
  • \n
  • [#1576] 消除重排序对 session 异步提交的影响
  • \n
  • [#1618] 优化 undolog manager 和 修复oracle undolog 的删除
  • \n
  • [#1469] 提供不释放数据库锁情况下等待全局锁的释放以减少锁冲突
  • \n
  • [#1619] 使用 StringBuffer 代替 StringBuilder
  • \n
  • [#1580] 优化 LockKeyConflictException 和更改 register 方法
  • \n
  • [#1574] 优化db存储模式下globalCommit 一次性删除全局锁
  • \n
  • [#1601] 优化 typo
  • \n
  • [#1602] 升级 fastjson 版本至 1.2.60 应对安全漏洞
  • \n
  • [#1583] 优化 oracle 主键的获取
  • \n
  • [#1575] 增加 RegisterTMRequest 的单元测试
  • \n
  • [#1559] 启动时延迟删除过期 undo_log
  • \n
  • [#1547] 删除 TableRecords 的 jackson 注解
  • \n
  • [#1542] 优化 AbstractSessionManager 日志
  • \n
  • [#1535] 去除 H2 和 pgsql 获取主键代码,修复 resultset 关闭问题
  • \n
  • [#1541] 代码清理
  • \n
  • [#1544] 去除中文注释
  • \n
  • [#1533] 重构多环境配置的代码逻辑
  • \n
  • [#1493] 增加 tableMeta 检测任务开关
  • \n
  • [#1530] 优化当数据表无索引时抛出显式异常
  • \n
  • [#1444] 简化map操作
  • \n
  • [#1497] 增加 seata-all 依赖
  • \n
  • [#1490] 移除不必要代码
  • \n
\n

非常感谢以下 contributors 的代码贡献。若有无意遗漏,请报告。

\n\n

同时,我们收到了社区反馈的很多有价值的issue和建议,非常感谢大家。

\n

常用链接

\n\n
\n

0.8.0 (2019-08-16)

\n\n

0.7.1 (2019-07-15)

\n\n

0.7.0 (2019-07-12)

\n\n

0.6.1 (2019-05-31)

\n\n

0.6.0 (2019-05-24)

\n\n

0.5.2 (2019-05-17)

\n\n

0.5.1 (2019-04-30)

\n\n

0.5.0 (2019-04-19)

\n\n

0.4.2 (2019-04-12)

\n\n

0.4.1 (2019-03-29)

\n\n

0.4.0 (2019-03-19)

\n\n

0.3.1 (2019-03-15)

\n\n

0.3.0 (2019-03-08)

\n\n

0.2.3 (2019-03-02)

\n\n

0.2.2 (2019-02-22)

\n\n

0.2.1 (2019-02-18)

\n\n

0.2.0 (2019-02-14)

\n\n

0.1.4 (2019-02-11)

\n\n

0.1.3 (2019-01-29)

\n\n

0.1.2 (2019-01-25)

\n\n

0.1.1 (2019-01-18)

\n\n

0.1.0 (2019-01-09)

\n\n", - "link": "/zh-cn/blog/download.html", - "meta": { - "title": "下载中心", - "keywords": "Seata, Downloads, Version", - "description": "本文将向你介绍如何点击了解各版本详情和升级注意事项。" - } -} \ No newline at end of file diff --git a/zh-cn/blog/dubbo-seata.html b/zh-cn/blog/dubbo-seata.html deleted file mode 100644 index 23bc17c8..00000000 --- a/zh-cn/blog/dubbo-seata.html +++ /dev/null @@ -1,205 +0,0 @@ - - - - - - - - - - 如何使用Seata保证Dubbo微服务间的一致性 - - - - -

如何使用Seata保证Dubbo微服务间的一致性

-

案例

-

用户采购商品业务,整个业务包含3个微服务:

-
    -
  • 库存服务: 扣减给定商品的库存数量。
  • -
  • 订单服务: 根据采购请求生成订单。
  • -
  • 账户服务: 用户账户金额扣减。
  • -
-

业务结构图

-

Architecture

-

StorageService

-
public interface StorageService {
-
-    /**
-     * deduct storage count
-     */
-    void deduct(String commodityCode, int count);
-}
-
-

OrderService

-
public interface OrderService {
-
-    /**
-     * create order
-     */
-    Order create(String userId, String commodityCode, int orderCount);
-}
-
-

AccountService

-
public interface AccountService {
-
-    /**
-     * debit balance of user's account
-     */
-    void debit(String userId, int money);
-}
-
-

主要的业务逻辑:

-
public class BusinessServiceImpl implements BusinessService {
-
-    private StorageService storageService;
-
-    private OrderService orderService;
-
-    /**
-     * purchase
-     */
-    public void purchase(String userId, String commodityCode, int orderCount) {
-
-        storageService.deduct(commodityCode, orderCount);
-
-        orderService.create(userId, commodityCode, orderCount);
-    }
-}
-
-
public class StorageServiceImpl implements StorageService {
-
-  private StorageDAO storageDAO;
-  
-    @Override
-    public void deduct(String commodityCode, int count) {
-        Storage storage = new Storage();
-        storage.setCount(count);
-        storage.setCommodityCode(commodityCode);
-        storageDAO.update(storage);
-    }
-}
-
-
public class OrderServiceImpl implements OrderService {
-
-    private OrderDAO orderDAO;
-
-    private AccountService accountService;
-
-    public Order create(String userId, String commodityCode, int orderCount) {
-
-        int orderMoney = calculate(commodityCode, orderCount);
-
-        accountService.debit(userId, orderMoney);
-
-        Order order = new Order();
-        order.userId = userId;
-        order.commodityCode = commodityCode;
-        order.count = orderCount;
-        order.money = orderMoney;
-
-        return orderDAO.insert(order);
-    }
-}
-
-

Seata 分布式事务解决方案

-

undefined

-

此处仅仅需要一行注解 @GlobalTransactional 写在业务发起方的方法上:

-

-    @GlobalTransactional
-    public void purchase(String userId, String commodityCode, int orderCount) {
-        ......
-    }
-
-

Dubbo 与 Seata 结合的例子

-

Step 1: 安装数据库

-
    -
  • 要求: MySQL (InnoDB 存储引擎)。
  • -
-

提示: 事实上例子中3个微服务需要3个独立的数据库,但为了方便我们使用同一物理库并配置3个逻辑连接串。

-

更改以下xml文件中的数据库url、username和password

-

dubbo-account-service.xml -dubbo-order-service.xml -dubbo-storage-service.xml

-
    <property name="url" value="jdbc:mysql://x.x.x.x:3306/xxx" />
-    <property name="username" value="xxx" />
-    <property name="password" value="xxx" />
-
-

Step 2: 为 Seata 创建 UNDO_LOG 表

-

UNDO_LOG 此表用于 Seata 的AT模式。

-
CREATE TABLE `undo_log` (
-  `id` bigint(20) NOT NULL AUTO_INCREMENT,
-  `branch_id` bigint(20) NOT NULL,
-  `xid` varchar(100) NOT NULL,
-  `rollback_info` longblob NOT NULL,
-  `log_status` int(11) NOT NULL,
-  `log_created` datetime NOT NULL,
-  `log_modified` datetime NOT NULL,
-  `ext` varchar(100) DEFAULT NULL,
-  PRIMARY KEY (`id`),
-  KEY `idx_unionkey` (`xid`,`branch_id`)
-) ENGINE=InnoDB AUTO_INCREMENT=159 DEFAULT CHARSET=utf8
-
-

Step 3: 创建相关业务表

-

-DROP TABLE IF EXISTS `storage_tbl`;
-CREATE TABLE `storage_tbl` (
-  `id` int(11) NOT NULL AUTO_INCREMENT,
-  `commodity_code` varchar(255) DEFAULT NULL,
-  `count` int(11) DEFAULT 0,
-  PRIMARY KEY (`id`),
-  UNIQUE KEY (`commodity_code`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-
-
-DROP TABLE IF EXISTS `order_tbl`;
-CREATE TABLE `order_tbl` (
-  `id` int(11) NOT NULL AUTO_INCREMENT,
-  `user_id` varchar(255) DEFAULT NULL,
-  `commodity_code` varchar(255) DEFAULT NULL,
-  `count` int(11) DEFAULT 0,
-  `money` int(11) DEFAULT 0,
-  PRIMARY KEY (`id`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-
-
-DROP TABLE IF EXISTS `account_tbl`;
-CREATE TABLE `account_tbl` (
-  `id` int(11) NOT NULL AUTO_INCREMENT,
-  `user_id` varchar(255) DEFAULT NULL,
-  `money` int(11) DEFAULT 0,
-  PRIMARY KEY (`id`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-
-

Step 4: 启动 Seata-Server 服务

-
    -
  • 下载Server package, 并解压。
  • -
  • 运行bin目录下的启动脚本。
  • -
-
sh seata-server.sh $LISTEN_PORT $PATH_FOR_PERSISTENT_DATA
-
-e.g.
-
-sh seata-server.sh 8091 /home/admin/seata/data/
-
-

Step 5: 运行例子

- -

相关项目

- -
- - - - - - diff --git a/zh-cn/blog/dubbo-seata.json b/zh-cn/blog/dubbo-seata.json deleted file mode 100644 index aed94666..00000000 --- a/zh-cn/blog/dubbo-seata.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "filename": "dubbo-seata.md", - "__html": "

如何使用Seata保证Dubbo微服务间的一致性

\n

案例

\n

用户采购商品业务,整个业务包含3个微服务:

\n
    \n
  • 库存服务: 扣减给定商品的库存数量。
  • \n
  • 订单服务: 根据采购请求生成订单。
  • \n
  • 账户服务: 用户账户金额扣减。
  • \n
\n

业务结构图

\n

\"Architecture\"

\n

StorageService

\n
public interface StorageService {\n\n    /**\n     * deduct storage count\n     */\n    void deduct(String commodityCode, int count);\n}\n
\n

OrderService

\n
public interface OrderService {\n\n    /**\n     * create order\n     */\n    Order create(String userId, String commodityCode, int orderCount);\n}\n
\n

AccountService

\n
public interface AccountService {\n\n    /**\n     * debit balance of user's account\n     */\n    void debit(String userId, int money);\n}\n
\n

主要的业务逻辑:

\n
public class BusinessServiceImpl implements BusinessService {\n\n    private StorageService storageService;\n\n    private OrderService orderService;\n\n    /**\n     * purchase\n     */\n    public void purchase(String userId, String commodityCode, int orderCount) {\n\n        storageService.deduct(commodityCode, orderCount);\n\n        orderService.create(userId, commodityCode, orderCount);\n    }\n}\n
\n
public class StorageServiceImpl implements StorageService {\n\n  private StorageDAO storageDAO;\n  \n    @Override\n    public void deduct(String commodityCode, int count) {\n        Storage storage = new Storage();\n        storage.setCount(count);\n        storage.setCommodityCode(commodityCode);\n        storageDAO.update(storage);\n    }\n}\n
\n
public class OrderServiceImpl implements OrderService {\n\n    private OrderDAO orderDAO;\n\n    private AccountService accountService;\n\n    public Order create(String userId, String commodityCode, int orderCount) {\n\n        int orderMoney = calculate(commodityCode, orderCount);\n\n        accountService.debit(userId, orderMoney);\n\n        Order order = new Order();\n        order.userId = userId;\n        order.commodityCode = commodityCode;\n        order.count = orderCount;\n        order.money = orderMoney;\n\n        return orderDAO.insert(order);\n    }\n}\n
\n

Seata 分布式事务解决方案

\n

\"undefined\"

\n

此处仅仅需要一行注解 @GlobalTransactional 写在业务发起方的方法上:

\n
\n    @GlobalTransactional\n    public void purchase(String userId, String commodityCode, int orderCount) {\n        ......\n    }\n
\n

Dubbo 与 Seata 结合的例子

\n

Step 1: 安装数据库

\n
    \n
  • 要求: MySQL (InnoDB 存储引擎)。
  • \n
\n

提示: 事实上例子中3个微服务需要3个独立的数据库,但为了方便我们使用同一物理库并配置3个逻辑连接串。

\n

更改以下xml文件中的数据库url、username和password

\n

dubbo-account-service.xml\ndubbo-order-service.xml\ndubbo-storage-service.xml

\n
    <property name=\"url\" value=\"jdbc:mysql://x.x.x.x:3306/xxx\" />\n    <property name=\"username\" value=\"xxx\" />\n    <property name=\"password\" value=\"xxx\" />\n
\n

Step 2: 为 Seata 创建 UNDO_LOG 表

\n

UNDO_LOG 此表用于 Seata 的AT模式。

\n
CREATE TABLE `undo_log` (\n  `id` bigint(20) NOT NULL AUTO_INCREMENT,\n  `branch_id` bigint(20) NOT NULL,\n  `xid` varchar(100) NOT NULL,\n  `rollback_info` longblob NOT NULL,\n  `log_status` int(11) NOT NULL,\n  `log_created` datetime NOT NULL,\n  `log_modified` datetime NOT NULL,\n  `ext` varchar(100) DEFAULT NULL,\n  PRIMARY KEY (`id`),\n  KEY `idx_unionkey` (`xid`,`branch_id`)\n) ENGINE=InnoDB AUTO_INCREMENT=159 DEFAULT CHARSET=utf8\n
\n

Step 3: 创建相关业务表

\n
\nDROP TABLE IF EXISTS `storage_tbl`;\nCREATE TABLE `storage_tbl` (\n  `id` int(11) NOT NULL AUTO_INCREMENT,\n  `commodity_code` varchar(255) DEFAULT NULL,\n  `count` int(11) DEFAULT 0,\n  PRIMARY KEY (`id`),\n  UNIQUE KEY (`commodity_code`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n\nDROP TABLE IF EXISTS `order_tbl`;\nCREATE TABLE `order_tbl` (\n  `id` int(11) NOT NULL AUTO_INCREMENT,\n  `user_id` varchar(255) DEFAULT NULL,\n  `commodity_code` varchar(255) DEFAULT NULL,\n  `count` int(11) DEFAULT 0,\n  `money` int(11) DEFAULT 0,\n  PRIMARY KEY (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n\nDROP TABLE IF EXISTS `account_tbl`;\nCREATE TABLE `account_tbl` (\n  `id` int(11) NOT NULL AUTO_INCREMENT,\n  `user_id` varchar(255) DEFAULT NULL,\n  `money` int(11) DEFAULT 0,\n  PRIMARY KEY (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n
\n

Step 4: 启动 Seata-Server 服务

\n
    \n
  • 下载Server package, 并解压。
  • \n
  • 运行bin目录下的启动脚本。
  • \n
\n
sh seata-server.sh $LISTEN_PORT $PATH_FOR_PERSISTENT_DATA\n\ne.g.\n\nsh seata-server.sh 8091 /home/admin/seata/data/\n
\n

Step 5: 运行例子

\n\n

相关项目

\n\n", - "link": "/zh-cn/blog/dubbo-seata.html", - "meta": { - "title": "如何使用Seata保证Dubbo微服务间的一致性", - "keywords": "Dubbo,Seata,一致性", - "description": "本文主要介绍如何使用Seata保证Dubbo微服务间的一致性", - "author": "slievrly", - "date": "2019-03-07" - } -} \ No newline at end of file diff --git a/zh-cn/blog/how-to-support-spring-cloud.html b/zh-cn/blog/how-to-support-spring-cloud.html deleted file mode 100644 index d75ae505..00000000 --- a/zh-cn/blog/how-to-support-spring-cloud.html +++ /dev/null @@ -1,489 +0,0 @@ - - - - - - - - - - Fescar 与 Spring Cloud 集成源码深度剖析 - - - - -

Fescar 与 Spring Cloud 集成源码深度剖析

-

Fescar 简介

-

常见的分布式事务方式有基于 2PC 的 XA (e.g. atomikos),从业务层入手的 TCC( e.g. byteTCC)、事务消息 ( e.g. RocketMQ Half Message) 等等。XA 是需要本地数据库支持的分布式事务的协议,资源锁在数据库层面导致性能较差,而支付宝作为布道师引入的 TCC 模式需要大量的业务代码保证,开发维护成本较高。

-

分布式事务是业界比较关注的领域,这也是短短时间 Fescar 能收获6k Star的原因之一。Fescar 名字取自 Fast & Easy Commit And Rollback ,简单来说 Fescar 通过对本地 RDBMS 分支事务的协调来驱动完成全局事务,是工作在应用层的中间件。主要优点是相对于XA模式是性能较好不长时间占用连接资源,相对于 TCC 方式开发成本和业务侵入性较低。

-

类似于 XA,Fescar 将角色分为 TC、RM、TM,事务整体过程模型如下:

-

Fescar事务过程

-
1. TM 向 TC 申请开启一个全局事务,全局事务创建成功并生成一个全局唯一的 XID。
-2. XID 在微服务调用链路的上下文中传播。
-3. RM 向 TC 注册分支事务,将其纳入 XID 对应全局事务的管辖。
-4. TM 向 TC 发起针对 XID 的全局提交或回滚决议。
-5. TC 调度 XID 下管辖的全部分支事务完成提交或回滚请求。
-
-

其中在目前的实现版本中 TC 是独立部署的进程,维护全局事务的操作记录和全局锁记录,负责协调并驱动全局事务的提交或回滚。TM RM 则与应用程序工作在同一应用进程。RM 对 JDBC 数据源采用代理的方式对底层数据库做管理,利用语法解析,在执行事务时保留快照,并生成 undo log。大概的流程和模型划分就介绍到这里,下面开始对 Fescar 事务传播机制的分析。

-

Fescar 事务传播机制

-

Fescar 事务传播包括应用内事务嵌套调用和跨服务调用的事务传播。Fescar 事务是怎么在微服务调用链中传播的呢?Fescar 提供了事务 API 允许用户手动绑定事务的 XID 并加入到全局事务中,所以我们根据不同的服务框架机制,将 XID 在链路中传递即可实现事务的传播。

-

RPC 请求过程分为调用方与被调用方两部分,我们需要对 XID 在请求与响应时做相应的处理。大致过程为:调用方即请求方将当前事务上下文中的 XID 取出,通过RPC协议传递给被调用方;被调用方从请求中的将 XID 取出,并绑定到自己的事务上下文中,纳入全局事务。微服务框架一般都有相应的 Filter 和 Interceptor 机制,我们来具体分析下 Spring Cloud 与Fescar 的整合过程。

-

Fescar 与 Spring Cloud Alibaba 集成部分源码解析

-

本部分源码全部来自于 spring-cloud-alibaba-fescar. 源码解析部分主要包括 AutoConfiguration、微服务被调用方和微服务调用方三大部分。对于微服务调用方方式具体分为 RestTemplate 和 Feign,其中对于 Feign 请求方式又进一步细分为结合 Hystrix 和 Sentinel 的使用模式。

-

Fescar AutoConfiguration

-

对于 AutoConfiguration 的解析此处只介绍与 Fescar 启动相关的部分,其他部分的解析将穿插于【微服务被调用方】和 【微服务调用方】章节进行介绍。

-

Fescar 的启动需要配置 GlobalTransactionScanner,GlobalTransactionScanner 负责初始化 Fescar 的 RM client、TM client 和 自动代理标注 GlobalTransactional 注解的类。GlobalTransactionScanner bean 的启动通过 GlobalTransactionAutoConfiguration 加载并注入FescarProperties。
-FescarProperties 包含了 Fescar 的重要属性 txServiceGroup ,此属性的可通过 application.properties 文件中的 key: spring.cloud.alibaba.fescar.txServiceGroup 读取,默认值为 ${spring.application.name}-fescar-service-group 。txServiceGroup 表示 Fescar 的逻辑事务分组名,此分组名通过配置中心(目前支持文件、Apollo)获取逻辑事务分组名对应的 TC 集群名称,进一步通过集群名称构造出 TC 集群的服务名,通过注册中心(目前支持nacos、redis、zk和eureka)和服务名找到可用的 TC 服务节点,然后 RM client、TM client 与 TC 进行 rpc 交互。

-

微服务被调用方

-

由于调用方的逻辑比较多一点,我们先分析被调用方的逻辑。针对于 Spring Cloud 项目,默认采用的 RPC 传输协议是 HTTP 协议,所以使用了 HandlerInterceptor 机制来对HTTP的请求做拦截。

-

HandlerInterceptor 是 Spring 提供的接口, 它有以下三个方法可以被覆写。

-
    /**
-	 * Intercept the execution of a handler. Called after HandlerMapping determined
-	 * an appropriate handler object, but before HandlerAdapter invokes the handler.
-	 */
-	default boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler)
-			throws Exception {
-
-		return true;
-	}
-
-	/**
-	 * Intercept the execution of a handler. Called after HandlerAdapter actually
-	 * invoked the handler, but before the DispatcherServlet renders the view.
-	 * Can expose additional model objects to the view via the given ModelAndView.
-	 */
-	default void postHandle(HttpServletRequest request, HttpServletResponse response, Object handler,
-			@Nullable ModelAndView modelAndView) throws Exception {
-	}
-
-	/**
-	 * Callback after completion of request processing, that is, after rendering
-	 * the view. Will be called on any outcome of handler execution, thus allows
-	 * for proper resource cleanup.
-	 */
-	default void afterCompletion(HttpServletRequest request, HttpServletResponse response, Object handler,
-			@Nullable Exception ex) throws Exception {
-	}
-
-

根据注释,我们可以很明确的看到各个方法的作用时间和常用用途。对于 Fescar 集成来讲,它根据需要重写了 preHandle、afterCompletion 方法。

-

FescarHandlerInterceptor 的作用是将服务链路传递过来的 XID,绑定到服务节点的事务上下文中,并且在请求完成后清理相关资源。FescarHandlerInterceptorConfiguration 中配置了所有的 url 均进行拦截,对所有的请求过来均会执行该拦截器,进行 XID 的转换与事务绑定。

-
/**
- * @author xiaojing
- *
- * Fescar HandlerInterceptor, Convert Fescar information into
- * @see com.alibaba.fescar.core.context.RootContext from http request's header in
- * {@link org.springframework.web.servlet.HandlerInterceptor#preHandle(HttpServletRequest , HttpServletResponse , Object )},
- * And clean up Fescar information after servlet method invocation in
- * {@link org.springframework.web.servlet.HandlerInterceptor#afterCompletion(HttpServletRequest, HttpServletResponse, Object, Exception)}
- */
-public class FescarHandlerInterceptor implements HandlerInterceptor {
-
-	private static final Logger log = LoggerFactory
-			.getLogger(FescarHandlerInterceptor.class);
-
-	@Override
-	public boolean preHandle(HttpServletRequest request, HttpServletResponse response,
-			Object handler) throws Exception {
-
-		String xid = RootContext.getXID();
-		String rpcXid = request.getHeader(RootContext.KEY_XID);
-		if (log.isDebugEnabled()) {
-			log.debug("xid in RootContext {} xid in RpcContext {}", xid, rpcXid);
-		}
-
-		if (xid == null && rpcXid != null) {
-			RootContext.bind(rpcXid);
-			if (log.isDebugEnabled()) {
-				log.debug("bind {} to RootContext", rpcXid);
-			}
-		}
-		return true;
-	}
-
-	@Override
-	public void afterCompletion(HttpServletRequest request, HttpServletResponse response,
-			Object handler, Exception e) throws Exception {
-
-		String rpcXid = request.getHeader(RootContext.KEY_XID);
-
-		if (StringUtils.isEmpty(rpcXid)) {
-			return;
-		}
-
-		String unbindXid = RootContext.unbind();
-		if (log.isDebugEnabled()) {
-			log.debug("unbind {} from RootContext", unbindXid);
-		}
-		if (!rpcXid.equalsIgnoreCase(unbindXid)) {
-			log.warn("xid in change during RPC from {} to {}", rpcXid, unbindXid);
-			if (unbindXid != null) {
-				RootContext.bind(unbindXid);
-				log.warn("bind {} back to RootContext", unbindXid);
-			}
-		}
-	}
-
-}
-
-
-

preHandle 在请求执行前被调用,xid 为当前事务上下文已经绑定的全局事务的唯一标识,rpcXid 为请求通过 HTTP Header 传递过来需要绑定的全局事务标识。preHandle 方法中判断如果当前事务上下文中没有 XID,且 rpcXid 不为空,那么就将 rpcXid 绑定到当前的事务上下文。

-

afterCompletion 在请求完成后被调用,该方法用来执行资源的相关清理动作。Fescar 通过 RootContext.unbind() 方法对事务上下文涉及到的 XID 进行解绑。下面 if 中的逻辑是为了代码的健壮性考虑,如果遇到 rpcXid和 unbindXid 不相等的情况,再将 unbindXid 重新绑定回去。

-

对于 Spring Cloud 来讲,默认采用的 RPC 方式是 HTTP 的方式,所以对被调用方来讲,它的请求拦截方式不用做任何区分,只需要从 Header 中将 XID 就可以取出绑定到自己的事务上下文中即可。但是对于调用方由于请求组件的多样化,包括熔断隔离机制,所以要区分不同的情况做处理,后面我们来具体分析一下。

-

微服务调用方

-

Fescar 将请求方式分为:RestTemplate、Feign、Feign+Hystrix 和 Feign+Sentinel 。不同的组件通过 Spring Boot 的 Auto Configuration 来完成自动的配置,具体的配置类清单可以看 spring.factories ,下文也会介绍相关的配置类。

-
RestTemplate
-

先来看下如果调用方如果是是基于 RestTemplate 的请求,Fescar 是怎么传递 XID 的。

-
public class FescarRestTemplateInterceptor implements ClientHttpRequestInterceptor {
-	@Override
-	public ClientHttpResponse intercept(HttpRequest httpRequest, byte[] bytes,
-			ClientHttpRequestExecution clientHttpRequestExecution) throws IOException {
-		HttpRequestWrapper requestWrapper = new HttpRequestWrapper(httpRequest);
-
-		String xid = RootContext.getXID();
-
-		if (!StringUtils.isEmpty(xid)) {
-			requestWrapper.getHeaders().add(RootContext.KEY_XID, xid);
-		}
-		return clientHttpRequestExecution.execute(requestWrapper, bytes);
-	}
-}
-
-

FescarRestTemplateInterceptor 实现了 ClientHttpRequestInterceptor 接口的 intercept 方法,对调用的请求做了包装,在发送请求时若存在 Fescar 事务上下文 XID 则取出并放到 HTTP Header 中。

-

FescarRestTemplateInterceptor 通过 FescarRestTemplateAutoConfiguration 实现将 FescarRestTemplateInterceptor 配置到 RestTemplate 中去。

-
@Configuration
-public class FescarRestTemplateAutoConfiguration {
-
-	@Bean
-	public FescarRestTemplateInterceptor fescarRestTemplateInterceptor() {
-		return new FescarRestTemplateInterceptor();
-	}
-
-	@Autowired(required = false)
-	private Collection<RestTemplate> restTemplates;
-
-	@Autowired
-	private FescarRestTemplateInterceptor fescarRestTemplateInterceptor;
-
-	@PostConstruct
-	public void init() {
-		if (this.restTemplates != null) {
-			for (RestTemplate restTemplate : restTemplates) {
-				List<ClientHttpRequestInterceptor> interceptors = new ArrayList<ClientHttpRequestInterceptor>(
-						restTemplate.getInterceptors());
-				interceptors.add(this.fescarRestTemplateInterceptor);
-				restTemplate.setInterceptors(interceptors);
-			}
-		}
-	}
-
-}
-
-

init 方法遍历所有的 restTemplate ,并将原来 restTemplate 中的拦截器取出,增加 fescarRestTemplateInterceptor 后置入并重排序。

-
Feign
-

Feign 类关系图

-

接下来看下 Feign 的相关代码,该包下面的类还是比较多的,我们先从其 AutoConfiguration 入手。

-
@Configuration
-@ConditionalOnClass(Client.class)
-@AutoConfigureBefore(FeignAutoConfiguration.class)
-public class FescarFeignClientAutoConfiguration {
-
-	@Bean
-	@Scope("prototype")
-	@ConditionalOnClass(name = "com.netflix.hystrix.HystrixCommand")
-	@ConditionalOnProperty(name = "feign.hystrix.enabled", havingValue = "true")
-	Feign.Builder feignHystrixBuilder(BeanFactory beanFactory) {
-		return FescarHystrixFeignBuilder.builder(beanFactory);
-	}
-
-	@Bean
-	@Scope("prototype")
-	@ConditionalOnClass(name = "com.alibaba.csp.sentinel.SphU")
-	@ConditionalOnProperty(name = "feign.sentinel.enabled", havingValue = "true")
-	Feign.Builder feignSentinelBuilder(BeanFactory beanFactory) {
-		return FescarSentinelFeignBuilder.builder(beanFactory);
-	}
-
-	@Bean
-	@ConditionalOnMissingBean
-	@Scope("prototype")
-	Feign.Builder feignBuilder(BeanFactory beanFactory) {
-		return FescarFeignBuilder.builder(beanFactory);
-	}
-
-	@Configuration
-	protected static class FeignBeanPostProcessorConfiguration {
-
-		@Bean
-		FescarBeanPostProcessor fescarBeanPostProcessor(
-				FescarFeignObjectWrapper fescarFeignObjectWrapper) {
-			return new FescarBeanPostProcessor(fescarFeignObjectWrapper);
-		}
-
-		@Bean
-		FescarContextBeanPostProcessor fescarContextBeanPostProcessor(
-				BeanFactory beanFactory) {
-			return new FescarContextBeanPostProcessor(beanFactory);
-		}
-
-		@Bean
-		FescarFeignObjectWrapper fescarFeignObjectWrapper(BeanFactory beanFactory) {
-			return new FescarFeignObjectWrapper(beanFactory);
-		}
-	}
-
-}
-
-

FescarFeignClientAutoConfiguration 在存在 Client.class 时生效,且要求作用在 FeignAutoConfiguration 之前。由于FeignClientsConfiguration 是在 FeignAutoConfiguration 生成 FeignContext 生效的,所以根据依赖关系, FescarFeignClientAutoConfiguration 同样早于 FeignClientsConfiguration。

-

FescarFeignClientAutoConfiguration 自定义了 Feign.Builder,针对于 feign.sentinel,feign.hystrix 和 feign 的情况做了适配,目的是自定义 feign 中 Client 的真正实现为 FescarFeignClient。

-
HystrixFeign.builder().retryer(Retryer.NEVER_RETRY)
-      .client(new FescarFeignClient(beanFactory))
-
-
SentinelFeign.builder().retryer(Retryer.NEVER_RETRY)
-				.client(new FescarFeignClient(beanFactory));
-
-
Feign.builder().client(new FescarFeignClient(beanFactory));
-
-

FescarFeignClient 是对原来的 Feign 客户端代理增强,具体代码见下图:

-
public class FescarFeignClient implements Client {
-
-	private final Client delegate;
-	private final BeanFactory beanFactory;
-
-	FescarFeignClient(BeanFactory beanFactory) {
-		this.beanFactory = beanFactory;
-		this.delegate = new Client.Default(null, null);
-	}
-
-	FescarFeignClient(BeanFactory beanFactory, Client delegate) {
-		this.delegate = delegate;
-		this.beanFactory = beanFactory;
-	}
-
-	@Override
-	public Response execute(Request request, Request.Options options) throws IOException {
-
-		Request modifiedRequest = getModifyRequest(request);
-
-		try {
-			return this.delegate.execute(modifiedRequest, options);
-		}
-		finally {
-
-		}
-	}
-
-	private Request getModifyRequest(Request request) {
-
-		String xid = RootContext.getXID();
-
-		if (StringUtils.isEmpty(xid)) {
-			return request;
-		}
-
-		Map<String, Collection<String>> headers = new HashMap<>();
-		headers.putAll(request.headers());
-
-		List<String> fescarXid = new ArrayList<>();
-		fescarXid.add(xid);
-		headers.put(RootContext.KEY_XID, fescarXid);
-
-		return Request.create(request.method(), request.url(), headers, request.body(),
-				request.charset());
-	}
-
-
-

上面的过程中我们可以看到,FescarFeignClient 对原来的 Request 做了修改,它首先将 XID 从当前的事务上下文中取出,在 XID 不为空的情况下,将 XID 放到了 Header 中。

-

FeignBeanPostProcessorConfiguration 定义了3个 bean:FescarContextBeanPostProcessor、FescarBeanPostProcessor 和 FescarFeignObjectWrapper。其中 FescarContextBeanPostProcessor FescarBeanPostProcessor 实现了Spring BeanPostProcessor 接口。 -以下为 FescarContextBeanPostProcessor 实现。

-
    @Override
-	public Object postProcessBeforeInitialization(Object bean, String beanName)
-			throws BeansException {
-		if (bean instanceof FeignContext && !(bean instanceof FescarFeignContext)) {
-			return new FescarFeignContext(getFescarFeignObjectWrapper(),
-					(FeignContext) bean);
-		}
-		return bean;
-	}
-
-	@Override
-	public Object postProcessAfterInitialization(Object bean, String beanName)
-			throws BeansException {
-		return bean;
-	}
-
-

BeanPostProcessor 中的两个方法可以对 Spring 容器中的 Bean 做前后处理,postProcessBeforeInitialization 处理时机是初始化之前,postProcessAfterInitialization 的处理时机是初始化之后,这2个方法的返回值可以是原先生成的实例 bean,或者使用 wrapper 包装后的实例。

-

FescarContextBeanPostProcessor 将 FeignContext 包装成 FescarFeignContext。
-FescarBeanPostProcessor 将 FeignClient 根据是否继承了 LoadBalancerFeignClient 包装成 FescarLoadBalancerFeignClient 和 FescarFeignClient。

-

FeignAutoConfiguration 中的 FeignContext 并没有加 ConditionalOnXXX 的条件,所以 Fescar 采用预置处理的方式将 FeignContext 包装成 FescarFeignContext。

-
    @Bean
-	public FeignContext feignContext() {
-		FeignContext context = new FeignContext();
-		context.setConfigurations(this.configurations);
-		return context;
-	}
-
-

而对于 Feign Client,FeignClientFactoryBean 中会获取 FeignContext 的实例对象。对于开发者采用 @Configuration 注解的自定义配置的 Feign Client 对象,这里会被配置到 builder,导致 FescarFeignBuilder 中增强后的 FescarFeignCliet 失效。FeignClientFactoryBean 中关键代码如下:

-
	/**
-	 * @param <T> the target type of the Feign client
-	 * @return a {@link Feign} client created with the specified data and the context information
-	 */
-	<T> T getTarget() {
-		FeignContext context = applicationContext.getBean(FeignContext.class);
-		Feign.Builder builder = feign(context);
-
-		if (!StringUtils.hasText(this.url)) {
-			if (!this.name.startsWith("http")) {
-				url = "http://" + this.name;
-			}
-			else {
-				url = this.name;
-			}
-			url += cleanPath();
-			return (T) loadBalance(builder, context, new HardCodedTarget<>(this.type,
-					this.name, url));
-		}
-		if (StringUtils.hasText(this.url) && !this.url.startsWith("http")) {
-			this.url = "http://" + this.url;
-		}
-		String url = this.url + cleanPath();
-		Client client = getOptional(context, Client.class);
-		if (client != null) {
-			if (client instanceof LoadBalancerFeignClient) {
-				// not load balancing because we have a url,
-				// but ribbon is on the classpath, so unwrap
-				client = ((LoadBalancerFeignClient)client).getDelegate();
-			}
-			builder.client(client);
-		}
-		Targeter targeter = get(context, Targeter.class);
-		return (T) targeter.target(this, builder, context, new HardCodedTarget<>(
-				this.type, this.name, url));
-	}
-
-

上述代码根据是否指定了注解参数中的 URL 来选择直接调用 URL 还是走负载均衡,targeter.target 通过动态代理创建对象。大致过程为:将解析出的feign方法放入map -,再通过将其作为参数传入生成InvocationHandler,进而生成动态代理对象。
-FescarContextBeanPostProcessor 的存在,即使开发者对 FeignClient 自定义操作,依旧可以完成 Fescar 所需的全局事务的增强。

-

对于 FescarFeignObjectWrapper,我们重点关注下Wrapper方法:

-
	Object wrap(Object bean) {
-		if (bean instanceof Client && !(bean instanceof FescarFeignClient)) {
-			if (bean instanceof LoadBalancerFeignClient) {
-				LoadBalancerFeignClient client = ((LoadBalancerFeignClient) bean);
-				return new FescarLoadBalancerFeignClient(client.getDelegate(), factory(),
-						clientFactory(), this.beanFactory);
-			}
-			return new FescarFeignClient(this.beanFactory, (Client) bean);
-		}
-		return bean;
-	}
-
-

wrap 方法中,如果 bean 是 LoadBalancerFeignClient 的实例对象,那么首先通过 client.getDelegate() 方法将 LoadBalancerFeignClient 代理的实际 Client 对象取出后包装成 FescarFeignClient,再生成 LoadBalancerFeignClient 的子类 FescarLoadBalancerFeignClient 对象。如果 bean 是 Client 的实例对象且不是 FescarFeignClient LoadBalancerFeignClient,那么 bean 会直接包装生成 FescarFeignClient。

-

上面的流程设计还是比较巧妙的,首先根据 Spring boot 的 Auto Configuration 控制了配置的先后顺序,同时自定义了 Feign Builder 的Bean,保证了 Client 均是经过增强后的 FescarFeignClient 。再通过 BeanPostProcessor 对Spring 容器中的 Bean 做了一遍包装,保证容器内的Bean均是增强后 FescarFeignClient ,避免 FeignClientFactoryBean getTarget 方法的替换动作。

-
Hystrix 隔离
-

下面我们再来看下 Hystrix 部分,为什么要单独把 Hystrix 拆出来看呢,而且 Fescar 代码也单独实现了个策略类。目前事务上下文 RootContext 的默认实现是基于 ThreadLocal 方式的 ThreadLocalContextCore,也就是上下文其实是和线程绑定的。Hystrix 本身有两种隔离状态的模式,基于信号量或者基于线程池进行隔离。Hystrix 官方建议是采取线程池的方式来充分隔离,也是一般情况下在采用的模式:

-
Thread or Semaphore
-The default, and the recommended setting, is to run HystrixCommands using thread isolation (THREAD) and HystrixObservableCommands using semaphore isolation (SEMAPHORE).
-
-Commands executed in threads have an extra layer of protection against latencies beyond what network timeouts can offer.
-
-Generally the only time you should use semaphore isolation for HystrixCommands is when the call is so high volume (hundreds per second, per instance) that the overhead of separate threads is too high; this typically only applies to non-network calls.
-
-

service 层的业务代码和请求发出的线程肯定不是同一个,那么 ThreadLocal 的方式就没办法将 XID 传递给 Hystrix 的线程并传递给被调用方的。怎么处理这件事情呢,Hystrix 提供了机制让开发者去自定义并发策略,只需要继承 HystrixConcurrencyStrategy 重写 wrapCallable 方法即可。

-
public class FescarHystrixConcurrencyStrategy extends HystrixConcurrencyStrategy {
-
-	private HystrixConcurrencyStrategy delegate;
-
-	public FescarHystrixConcurrencyStrategy() {
-		this.delegate = HystrixPlugins.getInstance().getConcurrencyStrategy();
-		HystrixPlugins.reset();
-		HystrixPlugins.getInstance().registerConcurrencyStrategy(this);
-	}
-
-	@Override
-	public <K> Callable<K> wrapCallable(Callable<K> c) {
-		if (c instanceof FescarContextCallable) {
-			return c;
-		}
-
-		Callable<K> wrappedCallable;
-		if (this.delegate != null) {
-			wrappedCallable = this.delegate.wrapCallable(c);
-		}
-		else {
-			wrappedCallable = c;
-		}
-		if (wrappedCallable instanceof FescarContextCallable) {
-			return wrappedCallable;
-		}
-
-		return new FescarContextCallable<>(wrappedCallable);
-	}
-
-	private static class FescarContextCallable<K> implements Callable<K> {
-
-		private final Callable<K> actual;
-		private final String xid;
-
-		FescarContextCallable(Callable<K> actual) {
-			this.actual = actual;
-			this.xid = RootContext.getXID();
-		}
-
-		@Override
-		public K call() throws Exception {
-			try {
-				RootContext.bind(xid);
-				return actual.call();
-			}
-			finally {
-				RootContext.unbind();
-			}
-		}
-
-	}
-}
-
-

Fescar 也提供一个 FescarHystrixAutoConfiguration,在存在 HystrixCommand 的时候生成FescarHystrixConcurrencyStrategy

-
@Configuration
-@ConditionalOnClass(HystrixCommand.class)
-public class FescarHystrixAutoConfiguration {
-
-	@Bean
-	FescarHystrixConcurrencyStrategy fescarHystrixConcurrencyStrategy() {
-		return new FescarHystrixConcurrencyStrategy();
-	}
-
-}
-
-

参考文献

- -

本文作者

-

郭树抗,社区昵称 ywind,曾就职于华为终端云,现搜狐智能媒体中心Java工程师,目前主要负责搜狐号相关开发,对分布式事务、分布式系统和微服务架构有异常浓厚的兴趣。
-季敏(清铭),社区昵称 slievrly,Fescar 开源项目负责人,阿里巴巴中间件 TXC/GTS 核心研发成员,长期从事于分布式中间件核心研发工作,在分布式事务领域有着较丰富的技术积累。

-
- - - - - - - diff --git a/zh-cn/blog/how-to-support-spring-cloud.json b/zh-cn/blog/how-to-support-spring-cloud.json deleted file mode 100644 index 675dcd61..00000000 --- a/zh-cn/blog/how-to-support-spring-cloud.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "filename": "how-to-support-spring-cloud.md", - "__html": "

Fescar 与 Spring Cloud 集成源码深度剖析

\n

Fescar 简介

\n

常见的分布式事务方式有基于 2PC 的 XA (e.g. atomikos),从业务层入手的 TCC( e.g. byteTCC)、事务消息 ( e.g. RocketMQ Half Message) 等等。XA 是需要本地数据库支持的分布式事务的协议,资源锁在数据库层面导致性能较差,而支付宝作为布道师引入的 TCC 模式需要大量的业务代码保证,开发维护成本较高。

\n

分布式事务是业界比较关注的领域,这也是短短时间 Fescar 能收获6k Star的原因之一。Fescar 名字取自 Fast & Easy Commit And Rollback ,简单来说 Fescar 通过对本地 RDBMS 分支事务的协调来驱动完成全局事务,是工作在应用层的中间件。主要优点是相对于XA模式是性能较好不长时间占用连接资源,相对于 TCC 方式开发成本和业务侵入性较低。

\n

类似于 XA,Fescar 将角色分为 TC、RM、TM,事务整体过程模型如下:

\n

\"Fescar事务过程\"

\n
1. TM 向 TC 申请开启一个全局事务,全局事务创建成功并生成一个全局唯一的 XID。\n2. XID 在微服务调用链路的上下文中传播。\n3. RM 向 TC 注册分支事务,将其纳入 XID 对应全局事务的管辖。\n4. TM 向 TC 发起针对 XID 的全局提交或回滚决议。\n5. TC 调度 XID 下管辖的全部分支事务完成提交或回滚请求。\n
\n

其中在目前的实现版本中 TC 是独立部署的进程,维护全局事务的操作记录和全局锁记录,负责协调并驱动全局事务的提交或回滚。TM RM 则与应用程序工作在同一应用进程。RM 对 JDBC 数据源采用代理的方式对底层数据库做管理,利用语法解析,在执行事务时保留快照,并生成 undo log。大概的流程和模型划分就介绍到这里,下面开始对 Fescar 事务传播机制的分析。

\n

Fescar 事务传播机制

\n

Fescar 事务传播包括应用内事务嵌套调用和跨服务调用的事务传播。Fescar 事务是怎么在微服务调用链中传播的呢?Fescar 提供了事务 API 允许用户手动绑定事务的 XID 并加入到全局事务中,所以我们根据不同的服务框架机制,将 XID 在链路中传递即可实现事务的传播。

\n

RPC 请求过程分为调用方与被调用方两部分,我们需要对 XID 在请求与响应时做相应的处理。大致过程为:调用方即请求方将当前事务上下文中的 XID 取出,通过RPC协议传递给被调用方;被调用方从请求中的将 XID 取出,并绑定到自己的事务上下文中,纳入全局事务。微服务框架一般都有相应的 Filter 和 Interceptor 机制,我们来具体分析下 Spring Cloud 与Fescar 的整合过程。

\n

Fescar 与 Spring Cloud Alibaba 集成部分源码解析

\n

本部分源码全部来自于 spring-cloud-alibaba-fescar. 源码解析部分主要包括 AutoConfiguration、微服务被调用方和微服务调用方三大部分。对于微服务调用方方式具体分为 RestTemplate 和 Feign,其中对于 Feign 请求方式又进一步细分为结合 Hystrix 和 Sentinel 的使用模式。

\n

Fescar AutoConfiguration

\n

对于 AutoConfiguration 的解析此处只介绍与 Fescar 启动相关的部分,其他部分的解析将穿插于【微服务被调用方】和 【微服务调用方】章节进行介绍。

\n

Fescar 的启动需要配置 GlobalTransactionScanner,GlobalTransactionScanner 负责初始化 Fescar 的 RM client、TM client 和 自动代理标注 GlobalTransactional 注解的类。GlobalTransactionScanner bean 的启动通过 GlobalTransactionAutoConfiguration 加载并注入FescarProperties。
\nFescarProperties 包含了 Fescar 的重要属性 txServiceGroup ,此属性的可通过 application.properties 文件中的 key: spring.cloud.alibaba.fescar.txServiceGroup 读取,默认值为 ${spring.application.name}-fescar-service-group 。txServiceGroup 表示 Fescar 的逻辑事务分组名,此分组名通过配置中心(目前支持文件、Apollo)获取逻辑事务分组名对应的 TC 集群名称,进一步通过集群名称构造出 TC 集群的服务名,通过注册中心(目前支持nacos、redis、zk和eureka)和服务名找到可用的 TC 服务节点,然后 RM client、TM client 与 TC 进行 rpc 交互。

\n

微服务被调用方

\n

由于调用方的逻辑比较多一点,我们先分析被调用方的逻辑。针对于 Spring Cloud 项目,默认采用的 RPC 传输协议是 HTTP 协议,所以使用了 HandlerInterceptor 机制来对HTTP的请求做拦截。

\n

HandlerInterceptor 是 Spring 提供的接口, 它有以下三个方法可以被覆写。

\n
    /**\n\t * Intercept the execution of a handler. Called after HandlerMapping determined\n\t * an appropriate handler object, but before HandlerAdapter invokes the handler.\n\t */\n\tdefault boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler)\n\t\t\tthrows Exception {\n\n\t\treturn true;\n\t}\n\n\t/**\n\t * Intercept the execution of a handler. Called after HandlerAdapter actually\n\t * invoked the handler, but before the DispatcherServlet renders the view.\n\t * Can expose additional model objects to the view via the given ModelAndView.\n\t */\n\tdefault void postHandle(HttpServletRequest request, HttpServletResponse response, Object handler,\n\t\t\t@Nullable ModelAndView modelAndView) throws Exception {\n\t}\n\n\t/**\n\t * Callback after completion of request processing, that is, after rendering\n\t * the view. Will be called on any outcome of handler execution, thus allows\n\t * for proper resource cleanup.\n\t */\n\tdefault void afterCompletion(HttpServletRequest request, HttpServletResponse response, Object handler,\n\t\t\t@Nullable Exception ex) throws Exception {\n\t}\n
\n

根据注释,我们可以很明确的看到各个方法的作用时间和常用用途。对于 Fescar 集成来讲,它根据需要重写了 preHandle、afterCompletion 方法。

\n

FescarHandlerInterceptor 的作用是将服务链路传递过来的 XID,绑定到服务节点的事务上下文中,并且在请求完成后清理相关资源。FescarHandlerInterceptorConfiguration 中配置了所有的 url 均进行拦截,对所有的请求过来均会执行该拦截器,进行 XID 的转换与事务绑定。

\n
/**\n * @author xiaojing\n *\n * Fescar HandlerInterceptor, Convert Fescar information into\n * @see com.alibaba.fescar.core.context.RootContext from http request's header in\n * {@link org.springframework.web.servlet.HandlerInterceptor#preHandle(HttpServletRequest , HttpServletResponse , Object )},\n * And clean up Fescar information after servlet method invocation in\n * {@link org.springframework.web.servlet.HandlerInterceptor#afterCompletion(HttpServletRequest, HttpServletResponse, Object, Exception)}\n */\npublic class FescarHandlerInterceptor implements HandlerInterceptor {\n\n\tprivate static final Logger log = LoggerFactory\n\t\t\t.getLogger(FescarHandlerInterceptor.class);\n\n\t@Override\n\tpublic boolean preHandle(HttpServletRequest request, HttpServletResponse response,\n\t\t\tObject handler) throws Exception {\n\n\t\tString xid = RootContext.getXID();\n\t\tString rpcXid = request.getHeader(RootContext.KEY_XID);\n\t\tif (log.isDebugEnabled()) {\n\t\t\tlog.debug(\"xid in RootContext {} xid in RpcContext {}\", xid, rpcXid);\n\t\t}\n\n\t\tif (xid == null && rpcXid != null) {\n\t\t\tRootContext.bind(rpcXid);\n\t\t\tif (log.isDebugEnabled()) {\n\t\t\t\tlog.debug(\"bind {} to RootContext\", rpcXid);\n\t\t\t}\n\t\t}\n\t\treturn true;\n\t}\n\n\t@Override\n\tpublic void afterCompletion(HttpServletRequest request, HttpServletResponse response,\n\t\t\tObject handler, Exception e) throws Exception {\n\n\t\tString rpcXid = request.getHeader(RootContext.KEY_XID);\n\n\t\tif (StringUtils.isEmpty(rpcXid)) {\n\t\t\treturn;\n\t\t}\n\n\t\tString unbindXid = RootContext.unbind();\n\t\tif (log.isDebugEnabled()) {\n\t\t\tlog.debug(\"unbind {} from RootContext\", unbindXid);\n\t\t}\n\t\tif (!rpcXid.equalsIgnoreCase(unbindXid)) {\n\t\t\tlog.warn(\"xid in change during RPC from {} to {}\", rpcXid, unbindXid);\n\t\t\tif (unbindXid != null) {\n\t\t\t\tRootContext.bind(unbindXid);\n\t\t\t\tlog.warn(\"bind {} back to RootContext\", unbindXid);\n\t\t\t}\n\t\t}\n\t}\n\n}\n\n
\n

preHandle 在请求执行前被调用,xid 为当前事务上下文已经绑定的全局事务的唯一标识,rpcXid 为请求通过 HTTP Header 传递过来需要绑定的全局事务标识。preHandle 方法中判断如果当前事务上下文中没有 XID,且 rpcXid 不为空,那么就将 rpcXid 绑定到当前的事务上下文。

\n

afterCompletion 在请求完成后被调用,该方法用来执行资源的相关清理动作。Fescar 通过 RootContext.unbind() 方法对事务上下文涉及到的 XID 进行解绑。下面 if 中的逻辑是为了代码的健壮性考虑,如果遇到 rpcXid和 unbindXid 不相等的情况,再将 unbindXid 重新绑定回去。

\n

对于 Spring Cloud 来讲,默认采用的 RPC 方式是 HTTP 的方式,所以对被调用方来讲,它的请求拦截方式不用做任何区分,只需要从 Header 中将 XID 就可以取出绑定到自己的事务上下文中即可。但是对于调用方由于请求组件的多样化,包括熔断隔离机制,所以要区分不同的情况做处理,后面我们来具体分析一下。

\n

微服务调用方

\n

Fescar 将请求方式分为:RestTemplate、Feign、Feign+Hystrix 和 Feign+Sentinel 。不同的组件通过 Spring Boot 的 Auto Configuration 来完成自动的配置,具体的配置类清单可以看 spring.factories ,下文也会介绍相关的配置类。

\n
RestTemplate
\n

先来看下如果调用方如果是是基于 RestTemplate 的请求,Fescar 是怎么传递 XID 的。

\n
public class FescarRestTemplateInterceptor implements ClientHttpRequestInterceptor {\n\t@Override\n\tpublic ClientHttpResponse intercept(HttpRequest httpRequest, byte[] bytes,\n\t\t\tClientHttpRequestExecution clientHttpRequestExecution) throws IOException {\n\t\tHttpRequestWrapper requestWrapper = new HttpRequestWrapper(httpRequest);\n\n\t\tString xid = RootContext.getXID();\n\n\t\tif (!StringUtils.isEmpty(xid)) {\n\t\t\trequestWrapper.getHeaders().add(RootContext.KEY_XID, xid);\n\t\t}\n\t\treturn clientHttpRequestExecution.execute(requestWrapper, bytes);\n\t}\n}\n
\n

FescarRestTemplateInterceptor 实现了 ClientHttpRequestInterceptor 接口的 intercept 方法,对调用的请求做了包装,在发送请求时若存在 Fescar 事务上下文 XID 则取出并放到 HTTP Header 中。

\n

FescarRestTemplateInterceptor 通过 FescarRestTemplateAutoConfiguration 实现将 FescarRestTemplateInterceptor 配置到 RestTemplate 中去。

\n
@Configuration\npublic class FescarRestTemplateAutoConfiguration {\n\n\t@Bean\n\tpublic FescarRestTemplateInterceptor fescarRestTemplateInterceptor() {\n\t\treturn new FescarRestTemplateInterceptor();\n\t}\n\n\t@Autowired(required = false)\n\tprivate Collection<RestTemplate> restTemplates;\n\n\t@Autowired\n\tprivate FescarRestTemplateInterceptor fescarRestTemplateInterceptor;\n\n\t@PostConstruct\n\tpublic void init() {\n\t\tif (this.restTemplates != null) {\n\t\t\tfor (RestTemplate restTemplate : restTemplates) {\n\t\t\t\tList<ClientHttpRequestInterceptor> interceptors = new ArrayList<ClientHttpRequestInterceptor>(\n\t\t\t\t\t\trestTemplate.getInterceptors());\n\t\t\t\tinterceptors.add(this.fescarRestTemplateInterceptor);\n\t\t\t\trestTemplate.setInterceptors(interceptors);\n\t\t\t}\n\t\t}\n\t}\n\n}\n
\n

init 方法遍历所有的 restTemplate ,并将原来 restTemplate 中的拦截器取出,增加 fescarRestTemplateInterceptor 后置入并重排序。

\n
Feign
\n

\"Feign

\n

接下来看下 Feign 的相关代码,该包下面的类还是比较多的,我们先从其 AutoConfiguration 入手。

\n
@Configuration\n@ConditionalOnClass(Client.class)\n@AutoConfigureBefore(FeignAutoConfiguration.class)\npublic class FescarFeignClientAutoConfiguration {\n\n\t@Bean\n\t@Scope(\"prototype\")\n\t@ConditionalOnClass(name = \"com.netflix.hystrix.HystrixCommand\")\n\t@ConditionalOnProperty(name = \"feign.hystrix.enabled\", havingValue = \"true\")\n\tFeign.Builder feignHystrixBuilder(BeanFactory beanFactory) {\n\t\treturn FescarHystrixFeignBuilder.builder(beanFactory);\n\t}\n\n\t@Bean\n\t@Scope(\"prototype\")\n\t@ConditionalOnClass(name = \"com.alibaba.csp.sentinel.SphU\")\n\t@ConditionalOnProperty(name = \"feign.sentinel.enabled\", havingValue = \"true\")\n\tFeign.Builder feignSentinelBuilder(BeanFactory beanFactory) {\n\t\treturn FescarSentinelFeignBuilder.builder(beanFactory);\n\t}\n\n\t@Bean\n\t@ConditionalOnMissingBean\n\t@Scope(\"prototype\")\n\tFeign.Builder feignBuilder(BeanFactory beanFactory) {\n\t\treturn FescarFeignBuilder.builder(beanFactory);\n\t}\n\n\t@Configuration\n\tprotected static class FeignBeanPostProcessorConfiguration {\n\n\t\t@Bean\n\t\tFescarBeanPostProcessor fescarBeanPostProcessor(\n\t\t\t\tFescarFeignObjectWrapper fescarFeignObjectWrapper) {\n\t\t\treturn new FescarBeanPostProcessor(fescarFeignObjectWrapper);\n\t\t}\n\n\t\t@Bean\n\t\tFescarContextBeanPostProcessor fescarContextBeanPostProcessor(\n\t\t\t\tBeanFactory beanFactory) {\n\t\t\treturn new FescarContextBeanPostProcessor(beanFactory);\n\t\t}\n\n\t\t@Bean\n\t\tFescarFeignObjectWrapper fescarFeignObjectWrapper(BeanFactory beanFactory) {\n\t\t\treturn new FescarFeignObjectWrapper(beanFactory);\n\t\t}\n\t}\n\n}\n
\n

FescarFeignClientAutoConfiguration 在存在 Client.class 时生效,且要求作用在 FeignAutoConfiguration 之前。由于FeignClientsConfiguration 是在 FeignAutoConfiguration 生成 FeignContext 生效的,所以根据依赖关系, FescarFeignClientAutoConfiguration 同样早于 FeignClientsConfiguration。

\n

FescarFeignClientAutoConfiguration 自定义了 Feign.Builder,针对于 feign.sentinel,feign.hystrix 和 feign 的情况做了适配,目的是自定义 feign 中 Client 的真正实现为 FescarFeignClient。

\n
HystrixFeign.builder().retryer(Retryer.NEVER_RETRY)\n      .client(new FescarFeignClient(beanFactory))\n
\n
SentinelFeign.builder().retryer(Retryer.NEVER_RETRY)\n\t\t\t\t.client(new FescarFeignClient(beanFactory));\n
\n
Feign.builder().client(new FescarFeignClient(beanFactory));\n
\n

FescarFeignClient 是对原来的 Feign 客户端代理增强,具体代码见下图:

\n
public class FescarFeignClient implements Client {\n\n\tprivate final Client delegate;\n\tprivate final BeanFactory beanFactory;\n\n\tFescarFeignClient(BeanFactory beanFactory) {\n\t\tthis.beanFactory = beanFactory;\n\t\tthis.delegate = new Client.Default(null, null);\n\t}\n\n\tFescarFeignClient(BeanFactory beanFactory, Client delegate) {\n\t\tthis.delegate = delegate;\n\t\tthis.beanFactory = beanFactory;\n\t}\n\n\t@Override\n\tpublic Response execute(Request request, Request.Options options) throws IOException {\n\n\t\tRequest modifiedRequest = getModifyRequest(request);\n\n\t\ttry {\n\t\t\treturn this.delegate.execute(modifiedRequest, options);\n\t\t}\n\t\tfinally {\n\n\t\t}\n\t}\n\n\tprivate Request getModifyRequest(Request request) {\n\n\t\tString xid = RootContext.getXID();\n\n\t\tif (StringUtils.isEmpty(xid)) {\n\t\t\treturn request;\n\t\t}\n\n\t\tMap<String, Collection<String>> headers = new HashMap<>();\n\t\theaders.putAll(request.headers());\n\n\t\tList<String> fescarXid = new ArrayList<>();\n\t\tfescarXid.add(xid);\n\t\theaders.put(RootContext.KEY_XID, fescarXid);\n\n\t\treturn Request.create(request.method(), request.url(), headers, request.body(),\n\t\t\t\trequest.charset());\n\t}\n\n
\n

上面的过程中我们可以看到,FescarFeignClient 对原来的 Request 做了修改,它首先将 XID 从当前的事务上下文中取出,在 XID 不为空的情况下,将 XID 放到了 Header 中。

\n

FeignBeanPostProcessorConfiguration 定义了3个 bean:FescarContextBeanPostProcessor、FescarBeanPostProcessor 和 FescarFeignObjectWrapper。其中 FescarContextBeanPostProcessor FescarBeanPostProcessor 实现了Spring BeanPostProcessor 接口。\n以下为 FescarContextBeanPostProcessor 实现。

\n
    @Override\n\tpublic Object postProcessBeforeInitialization(Object bean, String beanName)\n\t\t\tthrows BeansException {\n\t\tif (bean instanceof FeignContext && !(bean instanceof FescarFeignContext)) {\n\t\t\treturn new FescarFeignContext(getFescarFeignObjectWrapper(),\n\t\t\t\t\t(FeignContext) bean);\n\t\t}\n\t\treturn bean;\n\t}\n\n\t@Override\n\tpublic Object postProcessAfterInitialization(Object bean, String beanName)\n\t\t\tthrows BeansException {\n\t\treturn bean;\n\t}\n
\n

BeanPostProcessor 中的两个方法可以对 Spring 容器中的 Bean 做前后处理,postProcessBeforeInitialization 处理时机是初始化之前,postProcessAfterInitialization 的处理时机是初始化之后,这2个方法的返回值可以是原先生成的实例 bean,或者使用 wrapper 包装后的实例。

\n

FescarContextBeanPostProcessor 将 FeignContext 包装成 FescarFeignContext。
\nFescarBeanPostProcessor 将 FeignClient 根据是否继承了 LoadBalancerFeignClient 包装成 FescarLoadBalancerFeignClient 和 FescarFeignClient。

\n

FeignAutoConfiguration 中的 FeignContext 并没有加 ConditionalOnXXX 的条件,所以 Fescar 采用预置处理的方式将 FeignContext 包装成 FescarFeignContext。

\n
    @Bean\n\tpublic FeignContext feignContext() {\n\t\tFeignContext context = new FeignContext();\n\t\tcontext.setConfigurations(this.configurations);\n\t\treturn context;\n\t}\n
\n

而对于 Feign Client,FeignClientFactoryBean 中会获取 FeignContext 的实例对象。对于开发者采用 @Configuration 注解的自定义配置的 Feign Client 对象,这里会被配置到 builder,导致 FescarFeignBuilder 中增强后的 FescarFeignCliet 失效。FeignClientFactoryBean 中关键代码如下:

\n
\t/**\n\t * @param <T> the target type of the Feign client\n\t * @return a {@link Feign} client created with the specified data and the context information\n\t */\n\t<T> T getTarget() {\n\t\tFeignContext context = applicationContext.getBean(FeignContext.class);\n\t\tFeign.Builder builder = feign(context);\n\n\t\tif (!StringUtils.hasText(this.url)) {\n\t\t\tif (!this.name.startsWith(\"http\")) {\n\t\t\t\turl = \"http://\" + this.name;\n\t\t\t}\n\t\t\telse {\n\t\t\t\turl = this.name;\n\t\t\t}\n\t\t\turl += cleanPath();\n\t\t\treturn (T) loadBalance(builder, context, new HardCodedTarget<>(this.type,\n\t\t\t\t\tthis.name, url));\n\t\t}\n\t\tif (StringUtils.hasText(this.url) && !this.url.startsWith(\"http\")) {\n\t\t\tthis.url = \"http://\" + this.url;\n\t\t}\n\t\tString url = this.url + cleanPath();\n\t\tClient client = getOptional(context, Client.class);\n\t\tif (client != null) {\n\t\t\tif (client instanceof LoadBalancerFeignClient) {\n\t\t\t\t// not load balancing because we have a url,\n\t\t\t\t// but ribbon is on the classpath, so unwrap\n\t\t\t\tclient = ((LoadBalancerFeignClient)client).getDelegate();\n\t\t\t}\n\t\t\tbuilder.client(client);\n\t\t}\n\t\tTargeter targeter = get(context, Targeter.class);\n\t\treturn (T) targeter.target(this, builder, context, new HardCodedTarget<>(\n\t\t\t\tthis.type, this.name, url));\n\t}\n
\n

上述代码根据是否指定了注解参数中的 URL 来选择直接调用 URL 还是走负载均衡,targeter.target 通过动态代理创建对象。大致过程为:将解析出的feign方法放入map\n,再通过将其作为参数传入生成InvocationHandler,进而生成动态代理对象。
\nFescarContextBeanPostProcessor 的存在,即使开发者对 FeignClient 自定义操作,依旧可以完成 Fescar 所需的全局事务的增强。

\n

对于 FescarFeignObjectWrapper,我们重点关注下Wrapper方法:

\n
\tObject wrap(Object bean) {\n\t\tif (bean instanceof Client && !(bean instanceof FescarFeignClient)) {\n\t\t\tif (bean instanceof LoadBalancerFeignClient) {\n\t\t\t\tLoadBalancerFeignClient client = ((LoadBalancerFeignClient) bean);\n\t\t\t\treturn new FescarLoadBalancerFeignClient(client.getDelegate(), factory(),\n\t\t\t\t\t\tclientFactory(), this.beanFactory);\n\t\t\t}\n\t\t\treturn new FescarFeignClient(this.beanFactory, (Client) bean);\n\t\t}\n\t\treturn bean;\n\t}\n
\n

wrap 方法中,如果 bean 是 LoadBalancerFeignClient 的实例对象,那么首先通过 client.getDelegate() 方法将 LoadBalancerFeignClient 代理的实际 Client 对象取出后包装成 FescarFeignClient,再生成 LoadBalancerFeignClient 的子类 FescarLoadBalancerFeignClient 对象。如果 bean 是 Client 的实例对象且不是 FescarFeignClient LoadBalancerFeignClient,那么 bean 会直接包装生成 FescarFeignClient。

\n

上面的流程设计还是比较巧妙的,首先根据 Spring boot 的 Auto Configuration 控制了配置的先后顺序,同时自定义了 Feign Builder 的Bean,保证了 Client 均是经过增强后的 FescarFeignClient 。再通过 BeanPostProcessor 对Spring 容器中的 Bean 做了一遍包装,保证容器内的Bean均是增强后 FescarFeignClient ,避免 FeignClientFactoryBean getTarget 方法的替换动作。

\n
Hystrix 隔离
\n

下面我们再来看下 Hystrix 部分,为什么要单独把 Hystrix 拆出来看呢,而且 Fescar 代码也单独实现了个策略类。目前事务上下文 RootContext 的默认实现是基于 ThreadLocal 方式的 ThreadLocalContextCore,也就是上下文其实是和线程绑定的。Hystrix 本身有两种隔离状态的模式,基于信号量或者基于线程池进行隔离。Hystrix 官方建议是采取线程池的方式来充分隔离,也是一般情况下在采用的模式:

\n
Thread or Semaphore\nThe default, and the recommended setting, is to run HystrixCommands using thread isolation (THREAD) and HystrixObservableCommands using semaphore isolation (SEMAPHORE).\n\nCommands executed in threads have an extra layer of protection against latencies beyond what network timeouts can offer.\n\nGenerally the only time you should use semaphore isolation for HystrixCommands is when the call is so high volume (hundreds per second, per instance) that the overhead of separate threads is too high; this typically only applies to non-network calls.\n
\n

service 层的业务代码和请求发出的线程肯定不是同一个,那么 ThreadLocal 的方式就没办法将 XID 传递给 Hystrix 的线程并传递给被调用方的。怎么处理这件事情呢,Hystrix 提供了机制让开发者去自定义并发策略,只需要继承 HystrixConcurrencyStrategy 重写 wrapCallable 方法即可。

\n
public class FescarHystrixConcurrencyStrategy extends HystrixConcurrencyStrategy {\n\n\tprivate HystrixConcurrencyStrategy delegate;\n\n\tpublic FescarHystrixConcurrencyStrategy() {\n\t\tthis.delegate = HystrixPlugins.getInstance().getConcurrencyStrategy();\n\t\tHystrixPlugins.reset();\n\t\tHystrixPlugins.getInstance().registerConcurrencyStrategy(this);\n\t}\n\n\t@Override\n\tpublic <K> Callable<K> wrapCallable(Callable<K> c) {\n\t\tif (c instanceof FescarContextCallable) {\n\t\t\treturn c;\n\t\t}\n\n\t\tCallable<K> wrappedCallable;\n\t\tif (this.delegate != null) {\n\t\t\twrappedCallable = this.delegate.wrapCallable(c);\n\t\t}\n\t\telse {\n\t\t\twrappedCallable = c;\n\t\t}\n\t\tif (wrappedCallable instanceof FescarContextCallable) {\n\t\t\treturn wrappedCallable;\n\t\t}\n\n\t\treturn new FescarContextCallable<>(wrappedCallable);\n\t}\n\n\tprivate static class FescarContextCallable<K> implements Callable<K> {\n\n\t\tprivate final Callable<K> actual;\n\t\tprivate final String xid;\n\n\t\tFescarContextCallable(Callable<K> actual) {\n\t\t\tthis.actual = actual;\n\t\t\tthis.xid = RootContext.getXID();\n\t\t}\n\n\t\t@Override\n\t\tpublic K call() throws Exception {\n\t\t\ttry {\n\t\t\t\tRootContext.bind(xid);\n\t\t\t\treturn actual.call();\n\t\t\t}\n\t\t\tfinally {\n\t\t\t\tRootContext.unbind();\n\t\t\t}\n\t\t}\n\n\t}\n}\n
\n

Fescar 也提供一个 FescarHystrixAutoConfiguration,在存在 HystrixCommand 的时候生成FescarHystrixConcurrencyStrategy

\n
@Configuration\n@ConditionalOnClass(HystrixCommand.class)\npublic class FescarHystrixAutoConfiguration {\n\n\t@Bean\n\tFescarHystrixConcurrencyStrategy fescarHystrixConcurrencyStrategy() {\n\t\treturn new FescarHystrixConcurrencyStrategy();\n\t}\n\n}\n
\n

参考文献

\n\n

本文作者

\n

郭树抗,社区昵称 ywind,曾就职于华为终端云,现搜狐智能媒体中心Java工程师,目前主要负责搜狐号相关开发,对分布式事务、分布式系统和微服务架构有异常浓厚的兴趣。
\n季敏(清铭),社区昵称 slievrly,Fescar 开源项目负责人,阿里巴巴中间件 TXC/GTS 核心研发成员,长期从事于分布式中间件核心研发工作,在分布式事务领域有着较丰富的技术积累。

\n", - "link": "/zh-cn/blog/how-to-support-spring-cloud.html", - "meta": { - "title": "Fescar 与 Spring Cloud 集成源码深度剖析", - "author": "郭树抗 季敏", - "date": "2019/04/15", - "keywords": "fescar、seata、分布式事务" - } -} \ No newline at end of file diff --git a/zh-cn/blog/index.html b/zh-cn/blog/index.html deleted file mode 100644 index 6cf4fabc..00000000 --- a/zh-cn/blog/index.html +++ /dev/null @@ -1,32 +0,0 @@ - - - - - - - - - - 博客 - - - - -
博客
- - - - - - - diff --git a/zh-cn/blog/integrate-seata-with-spring-cloud.html b/zh-cn/blog/integrate-seata-with-spring-cloud.html deleted file mode 100644 index 669f78ad..00000000 --- a/zh-cn/blog/integrate-seata-with-spring-cloud.html +++ /dev/null @@ -1,240 +0,0 @@ - - - - - - - - - - Seata(Fescar)分布式事务 整合 Spring Cloud - - - - -

1.前言

-

针对Fescar 相信很多开发者已经对他并不陌生,当然Fescar 已经成为了过去时,为什么说它是过去时,因为Fescar 已经华丽的变身为Seata。如果还不知道Seata 的朋友,请登录下面网址查看。

-

SEATA GITHUB:[https://github.com/seata/seata]

-

对于阿里各位同学的前仆后继,给我们广大开发者带来很多开源软件,在这里对他们表示真挚的感谢与问候。

-

今天在这里和大家分享下Spring Cloud 整合Seata 的相关心得。也让更多的朋友在搭建的道路上少走一些弯路,少踩一些坑。

-

2.工程内容

-

本次搭建流程为:client->网关->服务消费者->服务提供者.

-
                        技术框架:spring cloud gateway
-
-                                spring cloud fegin
-
-                                nacos1.0.RC2
-
-                                fescar-server0.4.1(Seata)
-
-

关于nacos的启动方式请参考:Nacos启动参考

-

首先seata支持很多种注册服务方式,在 fescar-server-0.4.1\conf 目录下

-
    file.conf
-    logback.xml
-    nacos-config.sh
-    nacos-config.text
-    registry.conf
-
-

总共包含五个文件,其中 file.conf和 registry.conf 分别是我们在 服务消费者 & 服务提供者 代码段需要用到的文件。 -注:file.conf和 registry.conf 必须在当前使用的应用程序中,即: 服务消费者 & 服务提供者 两个应用在都需要包含。 -如果你采用了配置中心 是nacos 、zk ,file.cnf 是可以忽略的。但是type=“file” 如果是为file 就必须得用file.cnf

-

下面是registry.conf 文件中的配置信息,其中 registry 是注册服务中心配置。config为配置中心的配置地方。

-

从下面可知道目前seata支持nacos,file eureka redis zookeeper 等注册配置方式,默认下载的type=“file” 文件方式,当然这里选用什么方式,取决于

-

每个人项目的实际情况,这里我选用的是nacos,eureka的也是可以的,我这边分别对这两个版本进行整合测试均可以通过。

-

注:如果整合eureka请选用官方最新版本。

-

3.核心配置

-
registry {
-  # file 、nacos 、eureka、redis、zk
-  type = "nacos"
-
-  nacos {
-    serverAddr = "localhost"
-    namespace = "public"
-    cluster = "default"
-  }
-  eureka {
-    serviceUrl = "http://localhost:1001/eureka"
-    application = "default"
-    weight = "1"
-  }
-  redis {
-    serverAddr = "localhost:6379"
-    db = "0"
-  }
-  zk {
-    cluster = "default"
-    serverAddr = "127.0.0.1:2181"
-    session.timeout = 6000
-    connect.timeout = 2000
-  }
-  file {
-    name = "file.conf"
-  }
-}
-
-config {
-  # file、nacos 、apollo、zk
-  type = "nacos"
-
-  nacos {
-    serverAddr = "localhost"
-    namespace = "public"
-    cluster = "default"
-  }
-  apollo {
-    app.id = "fescar-server"
-    apollo.meta = "http://192.168.1.204:8801"
-  }
-  zk {
-    serverAddr = "127.0.0.1:2181"
-    session.timeout = 6000
-    connect.timeout = 2000
-  }
-  file {
-    name = "file.conf"
-  }
-}
-
-

这里要说明的是nacos-config.sh 是针对采用nacos配置中心的话,需要执行的一些默认初始化针对nacos的脚本。

-

SEATA的启动方式参考官方: 注意,这里需要说明下,命令启动官方是通过 空格区分参数,所以要注意。这里的IP 是可选参数,因为涉及到DNS解析,在部分情况下,有的时候在注册中心fescar 注入nacos的时候会通过获取地址,如果启动报错注册发现是计算机名称,需要指定IP。或者host配置IP指向。不过这个问题,在最新的SEATA中已经进行了修复。

-
sh fescar-server.sh 8091 /home/admin/fescar/data/ IP(可选)
-
-

上面提到过,在我们的代码中也是需要file.conf 和registry.conf 这里着重的地方要说的是file.conf,file.conf只有当registry中 配置file的时候才会进行加载,如果采用ZK、nacos、作为配置中心,可以忽略。因为type指定其他是不加载file.conf的,但是对应的 service.localRgroup.grouplist 和 service.vgroup_mapping 需要在支持配置中心 进行指定,这样你的client 在启动后会通过自动从配置中心获取对应的 SEATA 服务 和地址。如果不配置会出现无法连接server的错误。当然如果你采用的eureka在config的地方就需要采用type="file" 目前SEATA config暂时不支持eureka的形势

-
transport {
-  # tcp udt unix-domain-socket
-  type = "TCP"
-  #NIO NATIVE
-  server = "NIO"
-  #enable heartbeat
-  heartbeat = true
-  #thread factory for netty
-  thread-factory {
-    boss-thread-prefix = "NettyBoss"
-    worker-thread-prefix = "NettyServerNIOWorker"
-    server-executor-thread-prefix = "NettyServerBizHandler"
-    share-boss-worker = false
-    client-selector-thread-prefix = "NettyClientSelector"
-    client-selector-thread-size = 1
-    client-worker-thread-prefix = "NettyClientWorkerThread"
-    # netty boss thread size,will not be used for UDT
-    boss-thread-size = 1
-    #auto default pin or 8
-    worker-thread-size = 8
-  }
-}
-service {
-  #vgroup->rgroup
-  vgroup_mapping.service-provider-fescar-service-group = "default"
-  #only support single node
-  localRgroup.grouplist = "127.0.0.1:8091"
-  #degrade current not support
-  enableDegrade = false
-  #disable
-  disable = false
-}
-
-client {
-  async.commit.buffer.limit = 10000
-  lock {
-    retry.internal = 10
-    retry.times = 30
-  }
-}
-
-

4.服务相关

-

这里有两个地方需要注意

-
    grouplist IP,这里是当前fescar-sever的IP端口,
-    vgroup_mapping的配置。
-
-

vgroup_mapping.服务名称-fescar-service-group,这里 要说下服务名称其实是你当前的consumer 或者provider application.properties的配置的应用名称:spring.application.name=service-provider,源代码中是 获取应用名称与 fescar-service-group 进行拼接,做key值。同理value是当前fescar的服务名称, cluster = "default" / application = "default"

-
     vgroup_mapping.service-provider-fescar-service-group = "default"
-      #only support single node
-      localRgroup.grouplist = "127.0.0.1:8091"
-
-

同理无论是provider 还是consumer 都需要这两个文件进行配置。

-

如果你采用nacos做配置中心,需要在nacos通过添加配置方式进行配置添加。

-

5.事务使用

-

我这里的代码逻辑是请求通过网关进行负载转发到我的consumer上,在consumer 中通过fegin进行provider请求。官方的例子中是通过fegin进行的,而我们这边直接通过网关转发,所以全局事务同官方的demo一样 也都是在controller层。

-
@RestController
-public class DemoController {
-	@Autowired
-	private DemoFeignClient demoFeignClient;
-	
-	@Autowired
-	private DemoFeignClient2 demoFeignClient2;
-	@GlobalTransactional(timeoutMills = 300000, name = "spring-cloud-demo-tx")
-	@GetMapping("/getdemo")
-	public String demo() {
-		
-		// 调用A 服务  简单save
-		ResponseData<Integer> result = demoFeignClient.insertService("test",1);
-		if(result.getStatus()==400) {
-			System.out.println(result+"+++++++++++++++++++++++++++++++++++++++");
-			throw new RuntimeException("this is error1");
-		}
-	
-		// 调用B 服务。报错测试A 服务回滚
-		ResponseData<Integer>  result2 = demoFeignClient2.saveService();
-	
-		if(result2.getStatus()==400) {
-			System.out.println(result2+"+++++++++++++++++++++++++++++++++++++++");
-			throw new RuntimeException("this is error2");
-		}
-
-		return "SUCCESS";
-	}
-}
-
-

到此为止核心的事务整合基本到此结束了,我这里是针对A,B 两个provider进行调用,当B发生报错后,进行全局事务回滚。当然每个事务内部都可以通过自己的独立本地事务去处理自己本地事务方式。

-

SEATA是通过全局的XID方式进行事务统一标识方式。这里就不列出SEATA需要用的数据库表。具体参考:spring-cloud-fescar 官方DEMO

-

5.数据代理

-

这里还有一个重要的说明就是,在分库服务的情况下,每一个数据库内都需要有一个undo_log的数据库表进行XID统一存储处理。

-

同事针对每个提供服务的项目,需要进行数据库连接池的代理。也就是:

-

目前只支持Druid连接池,后续会继续支持。

-
@Configuration
-public class DatabaseConfiguration {
-
-	
-	@Bean(destroyMethod = "close", initMethod = "init")
-	@ConfigurationProperties(prefix="spring.datasource")
-	public DruidDataSource druidDataSource() {
-
-		return new DruidDataSource();
-	}
-	
-	
-	@Bean
-	public DataSourceProxy dataSourceProxy(DruidDataSource druidDataSource) {
-	
-		return new DataSourceProxy(druidDataSource);
-	}
-	
-
-    @Bean
-    public SqlSessionFactory sqlSessionFactory(DataSourceProxy dataSourceProxy) throws Exception {
-        SqlSessionFactoryBean factoryBean = new SqlSessionFactoryBean();
-        factoryBean.setDataSource(dataSourceProxy);    
-        return factoryBean.getObject();
-    }
-}
-
-

大家要注意的就是配置文件和数据代理。如果没有进行数据源代理,undo_log是无数据的,也就是没办进行XID的管理。

-

本文作者:大菲.Fei

-
- - - - - - - diff --git a/zh-cn/blog/integrate-seata-with-spring-cloud.json b/zh-cn/blog/integrate-seata-with-spring-cloud.json deleted file mode 100644 index 9424af8e..00000000 --- a/zh-cn/blog/integrate-seata-with-spring-cloud.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "filename": "integrate-seata-with-spring-cloud.md", - "__html": "

1.前言

\n

针对Fescar 相信很多开发者已经对他并不陌生,当然Fescar 已经成为了过去时,为什么说它是过去时,因为Fescar 已经华丽的变身为Seata。如果还不知道Seata 的朋友,请登录下面网址查看。

\n

SEATA GITHUB:[https://github.com/seata/seata]

\n

对于阿里各位同学的前仆后继,给我们广大开发者带来很多开源软件,在这里对他们表示真挚的感谢与问候。

\n

今天在这里和大家分享下Spring Cloud 整合Seata 的相关心得。也让更多的朋友在搭建的道路上少走一些弯路,少踩一些坑。

\n

2.工程内容

\n

本次搭建流程为:client->网关->服务消费者->服务提供者.

\n
                        技术框架:spring cloud gateway\n\n                                spring cloud fegin\n\n                                nacos1.0.RC2\n\n                                fescar-server0.4.1(Seata)\n
\n

关于nacos的启动方式请参考:Nacos启动参考

\n

首先seata支持很多种注册服务方式,在 fescar-server-0.4.1\\conf 目录下

\n
    file.conf\n    logback.xml\n    nacos-config.sh\n    nacos-config.text\n    registry.conf\n
\n

总共包含五个文件,其中 file.conf和 registry.conf 分别是我们在 服务消费者 & 服务提供者 代码段需要用到的文件。\n注:file.conf和 registry.conf 必须在当前使用的应用程序中,即: 服务消费者 & 服务提供者 两个应用在都需要包含。\n如果你采用了配置中心 是nacos 、zk ,file.cnf 是可以忽略的。但是type=“file” 如果是为file 就必须得用file.cnf

\n

下面是registry.conf 文件中的配置信息,其中 registry 是注册服务中心配置。config为配置中心的配置地方。

\n

从下面可知道目前seata支持nacos,file eureka redis zookeeper 等注册配置方式,默认下载的type=“file” 文件方式,当然这里选用什么方式,取决于

\n

每个人项目的实际情况,这里我选用的是nacos,eureka的也是可以的,我这边分别对这两个版本进行整合测试均可以通过。

\n

注:如果整合eureka请选用官方最新版本。

\n

3.核心配置

\n
registry {\n  # file 、nacos 、eureka、redis、zk\n  type = \"nacos\"\n\n  nacos {\n    serverAddr = \"localhost\"\n    namespace = \"public\"\n    cluster = \"default\"\n  }\n  eureka {\n    serviceUrl = \"http://localhost:1001/eureka\"\n    application = \"default\"\n    weight = \"1\"\n  }\n  redis {\n    serverAddr = \"localhost:6379\"\n    db = \"0\"\n  }\n  zk {\n    cluster = \"default\"\n    serverAddr = \"127.0.0.1:2181\"\n    session.timeout = 6000\n    connect.timeout = 2000\n  }\n  file {\n    name = \"file.conf\"\n  }\n}\n\nconfig {\n  # file、nacos 、apollo、zk\n  type = \"nacos\"\n\n  nacos {\n    serverAddr = \"localhost\"\n    namespace = \"public\"\n    cluster = \"default\"\n  }\n  apollo {\n    app.id = \"fescar-server\"\n    apollo.meta = \"http://192.168.1.204:8801\"\n  }\n  zk {\n    serverAddr = \"127.0.0.1:2181\"\n    session.timeout = 6000\n    connect.timeout = 2000\n  }\n  file {\n    name = \"file.conf\"\n  }\n}\n
\n

这里要说明的是nacos-config.sh 是针对采用nacos配置中心的话,需要执行的一些默认初始化针对nacos的脚本。

\n

SEATA的启动方式参考官方: 注意,这里需要说明下,命令启动官方是通过 空格区分参数,所以要注意。这里的IP 是可选参数,因为涉及到DNS解析,在部分情况下,有的时候在注册中心fescar 注入nacos的时候会通过获取地址,如果启动报错注册发现是计算机名称,需要指定IP。或者host配置IP指向。不过这个问题,在最新的SEATA中已经进行了修复。

\n
sh fescar-server.sh 8091 /home/admin/fescar/data/ IP(可选)\n
\n

上面提到过,在我们的代码中也是需要file.conf 和registry.conf 这里着重的地方要说的是file.conf,file.conf只有当registry中 配置file的时候才会进行加载,如果采用ZK、nacos、作为配置中心,可以忽略。因为type指定其他是不加载file.conf的,但是对应的 service.localRgroup.grouplist 和 service.vgroup_mapping 需要在支持配置中心 进行指定,这样你的client 在启动后会通过自动从配置中心获取对应的 SEATA 服务 和地址。如果不配置会出现无法连接server的错误。当然如果你采用的eureka在config的地方就需要采用type="file" 目前SEATA config暂时不支持eureka的形势

\n
transport {\n  # tcp udt unix-domain-socket\n  type = \"TCP\"\n  #NIO NATIVE\n  server = \"NIO\"\n  #enable heartbeat\n  heartbeat = true\n  #thread factory for netty\n  thread-factory {\n    boss-thread-prefix = \"NettyBoss\"\n    worker-thread-prefix = \"NettyServerNIOWorker\"\n    server-executor-thread-prefix = \"NettyServerBizHandler\"\n    share-boss-worker = false\n    client-selector-thread-prefix = \"NettyClientSelector\"\n    client-selector-thread-size = 1\n    client-worker-thread-prefix = \"NettyClientWorkerThread\"\n    # netty boss thread size,will not be used for UDT\n    boss-thread-size = 1\n    #auto default pin or 8\n    worker-thread-size = 8\n  }\n}\nservice {\n  #vgroup->rgroup\n  vgroup_mapping.service-provider-fescar-service-group = \"default\"\n  #only support single node\n  localRgroup.grouplist = \"127.0.0.1:8091\"\n  #degrade current not support\n  enableDegrade = false\n  #disable\n  disable = false\n}\n\nclient {\n  async.commit.buffer.limit = 10000\n  lock {\n    retry.internal = 10\n    retry.times = 30\n  }\n}\n
\n

4.服务相关

\n

这里有两个地方需要注意

\n
    grouplist IP,这里是当前fescar-sever的IP端口,\n    vgroup_mapping的配置。\n
\n

vgroup_mapping.服务名称-fescar-service-group,这里 要说下服务名称其实是你当前的consumer 或者provider application.properties的配置的应用名称:spring.application.name=service-provider,源代码中是 获取应用名称与 fescar-service-group 进行拼接,做key值。同理value是当前fescar的服务名称, cluster = "default" / application = "default"

\n
     vgroup_mapping.service-provider-fescar-service-group = \"default\"\n      #only support single node\n      localRgroup.grouplist = \"127.0.0.1:8091\"\n
\n

同理无论是provider 还是consumer 都需要这两个文件进行配置。

\n

如果你采用nacos做配置中心,需要在nacos通过添加配置方式进行配置添加。

\n

5.事务使用

\n

我这里的代码逻辑是请求通过网关进行负载转发到我的consumer上,在consumer 中通过fegin进行provider请求。官方的例子中是通过fegin进行的,而我们这边直接通过网关转发,所以全局事务同官方的demo一样 也都是在controller层。

\n
@RestController\npublic class DemoController {\n\t@Autowired\n\tprivate DemoFeignClient demoFeignClient;\n\t\n\t@Autowired\n\tprivate DemoFeignClient2 demoFeignClient2;\n\t@GlobalTransactional(timeoutMills = 300000, name = \"spring-cloud-demo-tx\")\n\t@GetMapping(\"/getdemo\")\n\tpublic String demo() {\n\t\t\n\t\t// 调用A 服务  简单save\n\t\tResponseData<Integer> result = demoFeignClient.insertService(\"test\",1);\n\t\tif(result.getStatus()==400) {\n\t\t\tSystem.out.println(result+\"+++++++++++++++++++++++++++++++++++++++\");\n\t\t\tthrow new RuntimeException(\"this is error1\");\n\t\t}\n\t\n\t\t// 调用B 服务。报错测试A 服务回滚\n\t\tResponseData<Integer>  result2 = demoFeignClient2.saveService();\n\t\n\t\tif(result2.getStatus()==400) {\n\t\t\tSystem.out.println(result2+\"+++++++++++++++++++++++++++++++++++++++\");\n\t\t\tthrow new RuntimeException(\"this is error2\");\n\t\t}\n\n\t\treturn \"SUCCESS\";\n\t}\n}\n
\n

到此为止核心的事务整合基本到此结束了,我这里是针对A,B 两个provider进行调用,当B发生报错后,进行全局事务回滚。当然每个事务内部都可以通过自己的独立本地事务去处理自己本地事务方式。

\n

SEATA是通过全局的XID方式进行事务统一标识方式。这里就不列出SEATA需要用的数据库表。具体参考:spring-cloud-fescar 官方DEMO

\n

5.数据代理

\n

这里还有一个重要的说明就是,在分库服务的情况下,每一个数据库内都需要有一个undo_log的数据库表进行XID统一存储处理。

\n

同事针对每个提供服务的项目,需要进行数据库连接池的代理。也就是:

\n

目前只支持Druid连接池,后续会继续支持。

\n
@Configuration\npublic class DatabaseConfiguration {\n\n\t\n\t@Bean(destroyMethod = \"close\", initMethod = \"init\")\n\t@ConfigurationProperties(prefix=\"spring.datasource\")\n\tpublic DruidDataSource druidDataSource() {\n\n\t\treturn new DruidDataSource();\n\t}\n\t\n\t\n\t@Bean\n\tpublic DataSourceProxy dataSourceProxy(DruidDataSource druidDataSource) {\n\t\n\t\treturn new DataSourceProxy(druidDataSource);\n\t}\n\t\n\n    @Bean\n    public SqlSessionFactory sqlSessionFactory(DataSourceProxy dataSourceProxy) throws Exception {\n        SqlSessionFactoryBean factoryBean = new SqlSessionFactoryBean();\n        factoryBean.setDataSource(dataSourceProxy);    \n        return factoryBean.getObject();\n    }\n}\n
\n

大家要注意的就是配置文件和数据代理。如果没有进行数据源代理,undo_log是无数据的,也就是没办进行XID的管理。

\n

本文作者:大菲.Fei

\n", - "link": "/zh-cn/blog/integrate-seata-with-spring-cloud.html", - "meta": { - "title": "Seata(Fescar)分布式事务 整合 Spring Cloud", - "author": "大菲.Fei", - "date": "2019/04/15", - "keywords": "fescar、seata、分布式事务" - } -} \ No newline at end of file diff --git a/zh-cn/blog/manual-transaction-mode.html b/zh-cn/blog/manual-transaction-mode.html deleted file mode 100644 index 51841bfd..00000000 --- a/zh-cn/blog/manual-transaction-mode.html +++ /dev/null @@ -1,53 +0,0 @@ - - - - - - - - - - MT 模式 - - - - -

Manual Transaction 模式

-

回顾总览中的描述:一个分布式的全局事务,整体是 两阶段提交 的模型。全局事务是由若干分支事务组成的,分支事务要满足 两阶段提交 的模型要求,即需要每个分支事务都具备自己的:

-
    -
  • 一阶段 prepare 行为
  • -
  • 二阶段 commit 或 rollback 行为
  • -
-

Overview of a global transaction

-

根据两阶段行为模式的不同,我们将分支事务划分为 Automatic (Branch) Transaction ModeManual (Branch) Transaction Mode.

-

AT 模式(参考链接 TBD)基于 支持本地 ACID 事务关系型数据库

-
    -
  • 一阶段 prepare 行为:在本地事务中,一并提交业务数据更新和相应回滚日志记录。
  • -
  • 二阶段 commit 行为:马上成功结束,自动 异步批量清理回滚日志。
  • -
  • 二阶段 rollback 行为:通过回滚日志,自动 生成补偿操作,完成数据回滚。
  • -
-

相应的,MT 模式,不依赖于底层数据资源的事务支持:

-
    -
  • 一阶段 prepare 行为:调用 自定义 的 prepare 逻辑。
  • -
  • 二阶段 commit 行为:调用 自定义 的 commit 逻辑。
  • -
  • 二阶段 rollback 行为:调用 自定义 的 rollback 逻辑。
  • -
-

所谓 MT 模式,是指支持把 自定义 的分支事务纳入到全局事务的管理中。

-
- - - - - - - diff --git a/zh-cn/blog/manual-transaction-mode.json b/zh-cn/blog/manual-transaction-mode.json deleted file mode 100644 index f5fe2a49..00000000 --- a/zh-cn/blog/manual-transaction-mode.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "filename": "manual-transaction-mode.md", - "__html": "

Manual Transaction 模式

\n

回顾总览中的描述:一个分布式的全局事务,整体是 两阶段提交 的模型。全局事务是由若干分支事务组成的,分支事务要满足 两阶段提交 的模型要求,即需要每个分支事务都具备自己的:

\n
    \n
  • 一阶段 prepare 行为
  • \n
  • 二阶段 commit 或 rollback 行为
  • \n
\n

\"Overview

\n

根据两阶段行为模式的不同,我们将分支事务划分为 Automatic (Branch) Transaction ModeManual (Branch) Transaction Mode.

\n

AT 模式(参考链接 TBD)基于 支持本地 ACID 事务关系型数据库

\n
    \n
  • 一阶段 prepare 行为:在本地事务中,一并提交业务数据更新和相应回滚日志记录。
  • \n
  • 二阶段 commit 行为:马上成功结束,自动 异步批量清理回滚日志。
  • \n
  • 二阶段 rollback 行为:通过回滚日志,自动 生成补偿操作,完成数据回滚。
  • \n
\n

相应的,MT 模式,不依赖于底层数据资源的事务支持:

\n
    \n
  • 一阶段 prepare 行为:调用 自定义 的 prepare 逻辑。
  • \n
  • 二阶段 commit 行为:调用 自定义 的 commit 逻辑。
  • \n
  • 二阶段 rollback 行为:调用 自定义 的 rollback 逻辑。
  • \n
\n

所谓 MT 模式,是指支持把 自定义 的分支事务纳入到全局事务的管理中。

\n", - "link": "/zh-cn/blog/manual-transaction-mode.html", - "meta": { - "title": "MT 模式", - "keywords": "MT 模式", - "description": "介绍 MT 模式", - "author": "kmmshmily", - "date": "2019-02-13" - } -} \ No newline at end of file diff --git a/zh-cn/blog/quick-start-use-seata-and-dubbo-services.html b/zh-cn/blog/quick-start-use-seata-and-dubbo-services.html deleted file mode 100644 index 41b8e2fc..00000000 --- a/zh-cn/blog/quick-start-use-seata-and-dubbo-services.html +++ /dev/null @@ -1,214 +0,0 @@ - - - - - - - - - - 如何使用Seata保证Dubbo微服务间的一致性 - - - - -

如何使用Seata保证Dubbo微服务间的一致性

-

案例

-

用户采购商品业务,整个业务包含3个微服务:

-
    -
  • 库存服务: 扣减给定商品的库存数量。
  • -
  • 订单服务: 根据采购请求生成订单。
  • -
  • 账户服务: 用户账户金额扣减。
  • -
-

业务结构图

-

Architecture

-

StorageService

-
public interface StorageService {
-
-    /**
-     * deduct storage count
-     */
-    void deduct(String commodityCode, int count);
-}
-
-

OrderService

-
public interface OrderService {
-
-    /**
-     * create order
-     */
-    Order create(String userId, String commodityCode, int orderCount);
-}
-
-

AccountService

-
public interface AccountService {
-
-    /**
-     * debit balance of user's account
-     */
-    void debit(String userId, int money);
-}
-
-

主要的业务逻辑:

-
public class BusinessServiceImpl implements BusinessService {
-
-    private StorageService storageService;
-
-    private OrderService orderService;
-
-    /**
-     * purchase
-     */
-    public void purchase(String userId, String commodityCode, int orderCount) {
-
-        storageService.deduct(commodityCode, orderCount);
-
-        orderService.create(userId, commodityCode, orderCount);
-    }
-}
-
-
public class StorageServiceImpl implements StorageService {
-
-  private StorageDAO storageDAO;
-  
-    @Override
-    public void deduct(String commodityCode, int count) {
-        Storage storage = new Storage();
-        storage.setCount(count);
-        storage.setCommodityCode(commodityCode);
-        storageDAO.update(storage);
-    }
-}
-
-
public class OrderServiceImpl implements OrderService {
-
-    private OrderDAO orderDAO;
-
-    private AccountService accountService;
-
-    public Order create(String userId, String commodityCode, int orderCount) {
-
-        int orderMoney = calculate(commodityCode, orderCount);
-
-        accountService.debit(userId, orderMoney);
-
-        Order order = new Order();
-        order.userId = userId;
-        order.commodityCode = commodityCode;
-        order.count = orderCount;
-        order.money = orderMoney;
-
-        return orderDAO.insert(order);
-    }
-}
-
-

Seata 分布式事务解决方案

-

undefined

-

此处仅仅需要一行注解 @GlobalTransactional 写在业务发起方的方法上:

-

-    @GlobalTransactional
-    public void purchase(String userId, String commodityCode, int orderCount) {
-        ......
-    }
-
-

Dubbo 与 Seata 结合的例子

-

Step 1: 安装数据库

-
    -
  • 要求: MySQL (InnoDB 存储引擎)。
  • -
-

提示: 事实上例子中3个微服务需要3个独立的数据库,但为了方便我们使用同一物理库并配置3个逻辑连接串。

-

更改以下xml文件中的数据库url、username和password

-

dubbo-account-service.xml -dubbo-order-service.xml -dubbo-storage-service.xml

-
    <property name="url" value="jdbc:mysql://x.x.x.x:3306/xxx" />
-    <property name="username" value="xxx" />
-    <property name="password" value="xxx" />
-
-

Step 2: 为 Seata 创建 UNDO_LOG 表

-

UNDO_LOG 此表用于 Seata 的AT模式。

-
CREATE TABLE `undo_log` (
-  `id` bigint(20) NOT NULL AUTO_INCREMENT,
-  `branch_id` bigint(20) NOT NULL,
-  `xid` varchar(100) NOT NULL,
-  `rollback_info` longblob NOT NULL,
-  `log_status` int(11) NOT NULL,
-  `log_created` datetime NOT NULL,
-  `log_modified` datetime NOT NULL,
-  `ext` varchar(100) DEFAULT NULL,
-  PRIMARY KEY (`id`),
-  KEY `idx_unionkey` (`xid`,`branch_id`)
-) ENGINE=InnoDB AUTO_INCREMENT=159 DEFAULT CHARSET=utf8
-
-

Step 3: 创建相关业务表

-

-DROP TABLE IF EXISTS `storage_tbl`;
-CREATE TABLE `storage_tbl` (
-  `id` int(11) NOT NULL AUTO_INCREMENT,
-  `commodity_code` varchar(255) DEFAULT NULL,
-  `count` int(11) DEFAULT 0,
-  PRIMARY KEY (`id`),
-  UNIQUE KEY (`commodity_code`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-
-
-DROP TABLE IF EXISTS `order_tbl`;
-CREATE TABLE `order_tbl` (
-  `id` int(11) NOT NULL AUTO_INCREMENT,
-  `user_id` varchar(255) DEFAULT NULL,
-  `commodity_code` varchar(255) DEFAULT NULL,
-  `count` int(11) DEFAULT 0,
-  `money` int(11) DEFAULT 0,
-  PRIMARY KEY (`id`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-
-
-DROP TABLE IF EXISTS `account_tbl`;
-CREATE TABLE `account_tbl` (
-  `id` int(11) NOT NULL AUTO_INCREMENT,
-  `user_id` varchar(255) DEFAULT NULL,
-  `money` int(11) DEFAULT 0,
-  PRIMARY KEY (`id`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-
-

Step 4: 启动 Seata-Server 服务

-
    -
  • 下载Server package, 并解压。
  • -
  • 运行bin目录下的启动脚本。
  • -
-
sh seata-server.sh $LISTEN_PORT $PATH_FOR_PERSISTENT_DATA
-
-e.g.
-
-sh seata-server.sh 8091 /home/admin/seata/data/
-
-

Step 5: 运行例子

- -

相关项目

- -
- - - - - - - diff --git a/zh-cn/blog/quick-start-use-seata-and-dubbo-services.json b/zh-cn/blog/quick-start-use-seata-and-dubbo-services.json deleted file mode 100644 index 35cd40a5..00000000 --- a/zh-cn/blog/quick-start-use-seata-and-dubbo-services.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "filename": "quick-start-use-seata-and-dubbo-services.md", - "__html": "

如何使用Seata保证Dubbo微服务间的一致性

\n

案例

\n

用户采购商品业务,整个业务包含3个微服务:

\n
    \n
  • 库存服务: 扣减给定商品的库存数量。
  • \n
  • 订单服务: 根据采购请求生成订单。
  • \n
  • 账户服务: 用户账户金额扣减。
  • \n
\n

业务结构图

\n

\"Architecture\"

\n

StorageService

\n
public interface StorageService {\n\n    /**\n     * deduct storage count\n     */\n    void deduct(String commodityCode, int count);\n}\n
\n

OrderService

\n
public interface OrderService {\n\n    /**\n     * create order\n     */\n    Order create(String userId, String commodityCode, int orderCount);\n}\n
\n

AccountService

\n
public interface AccountService {\n\n    /**\n     * debit balance of user's account\n     */\n    void debit(String userId, int money);\n}\n
\n

主要的业务逻辑:

\n
public class BusinessServiceImpl implements BusinessService {\n\n    private StorageService storageService;\n\n    private OrderService orderService;\n\n    /**\n     * purchase\n     */\n    public void purchase(String userId, String commodityCode, int orderCount) {\n\n        storageService.deduct(commodityCode, orderCount);\n\n        orderService.create(userId, commodityCode, orderCount);\n    }\n}\n
\n
public class StorageServiceImpl implements StorageService {\n\n  private StorageDAO storageDAO;\n  \n    @Override\n    public void deduct(String commodityCode, int count) {\n        Storage storage = new Storage();\n        storage.setCount(count);\n        storage.setCommodityCode(commodityCode);\n        storageDAO.update(storage);\n    }\n}\n
\n
public class OrderServiceImpl implements OrderService {\n\n    private OrderDAO orderDAO;\n\n    private AccountService accountService;\n\n    public Order create(String userId, String commodityCode, int orderCount) {\n\n        int orderMoney = calculate(commodityCode, orderCount);\n\n        accountService.debit(userId, orderMoney);\n\n        Order order = new Order();\n        order.userId = userId;\n        order.commodityCode = commodityCode;\n        order.count = orderCount;\n        order.money = orderMoney;\n\n        return orderDAO.insert(order);\n    }\n}\n
\n

Seata 分布式事务解决方案

\n

\"undefined\"

\n

此处仅仅需要一行注解 @GlobalTransactional 写在业务发起方的方法上:

\n
\n    @GlobalTransactional\n    public void purchase(String userId, String commodityCode, int orderCount) {\n        ......\n    }\n
\n

Dubbo 与 Seata 结合的例子

\n

Step 1: 安装数据库

\n
    \n
  • 要求: MySQL (InnoDB 存储引擎)。
  • \n
\n

提示: 事实上例子中3个微服务需要3个独立的数据库,但为了方便我们使用同一物理库并配置3个逻辑连接串。

\n

更改以下xml文件中的数据库url、username和password

\n

dubbo-account-service.xml\ndubbo-order-service.xml\ndubbo-storage-service.xml

\n
    <property name=\"url\" value=\"jdbc:mysql://x.x.x.x:3306/xxx\" />\n    <property name=\"username\" value=\"xxx\" />\n    <property name=\"password\" value=\"xxx\" />\n
\n

Step 2: 为 Seata 创建 UNDO_LOG 表

\n

UNDO_LOG 此表用于 Seata 的AT模式。

\n
CREATE TABLE `undo_log` (\n  `id` bigint(20) NOT NULL AUTO_INCREMENT,\n  `branch_id` bigint(20) NOT NULL,\n  `xid` varchar(100) NOT NULL,\n  `rollback_info` longblob NOT NULL,\n  `log_status` int(11) NOT NULL,\n  `log_created` datetime NOT NULL,\n  `log_modified` datetime NOT NULL,\n  `ext` varchar(100) DEFAULT NULL,\n  PRIMARY KEY (`id`),\n  KEY `idx_unionkey` (`xid`,`branch_id`)\n) ENGINE=InnoDB AUTO_INCREMENT=159 DEFAULT CHARSET=utf8\n
\n

Step 3: 创建相关业务表

\n
\nDROP TABLE IF EXISTS `storage_tbl`;\nCREATE TABLE `storage_tbl` (\n  `id` int(11) NOT NULL AUTO_INCREMENT,\n  `commodity_code` varchar(255) DEFAULT NULL,\n  `count` int(11) DEFAULT 0,\n  PRIMARY KEY (`id`),\n  UNIQUE KEY (`commodity_code`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n\nDROP TABLE IF EXISTS `order_tbl`;\nCREATE TABLE `order_tbl` (\n  `id` int(11) NOT NULL AUTO_INCREMENT,\n  `user_id` varchar(255) DEFAULT NULL,\n  `commodity_code` varchar(255) DEFAULT NULL,\n  `count` int(11) DEFAULT 0,\n  `money` int(11) DEFAULT 0,\n  PRIMARY KEY (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n\nDROP TABLE IF EXISTS `account_tbl`;\nCREATE TABLE `account_tbl` (\n  `id` int(11) NOT NULL AUTO_INCREMENT,\n  `user_id` varchar(255) DEFAULT NULL,\n  `money` int(11) DEFAULT 0,\n  PRIMARY KEY (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n
\n

Step 4: 启动 Seata-Server 服务

\n
    \n
  • 下载Server package, 并解压。
  • \n
  • 运行bin目录下的启动脚本。
  • \n
\n
sh seata-server.sh $LISTEN_PORT $PATH_FOR_PERSISTENT_DATA\n\ne.g.\n\nsh seata-server.sh 8091 /home/admin/seata/data/\n
\n

Step 5: 运行例子

\n\n

相关项目

\n\n", - "link": "/zh-cn/blog/quick-start-use-seata-and-dubbo-services.html", - "meta": { - "title": "如何使用Seata保证Dubbo微服务间的一致性", - "keywords": "Dubbo,Seata,一致性", - "description": "本文主要介绍如何使用Seata保证Dubbo微服务间的一致性", - "author": "slievrly", - "date": "2019-03-07" - } -} \ No newline at end of file diff --git a/zh-cn/blog/seata-analysis-go-server.html b/zh-cn/blog/seata-analysis-go-server.html deleted file mode 100644 index 2e443d68..00000000 --- a/zh-cn/blog/seata-analysis-go-server.html +++ /dev/null @@ -1,92 +0,0 @@ - - - - - - - - - - Seata分布式Go Server正式开源-TaaS设计简介 - - - - -

Seata 高可用服务端 TaaS 正式开源

-

前言

-

TaaS 是 Seata 服务端(TC, Transaction Coordinator)的一种高可用实现,使用 Golang 编写。Taas 由InfiniVision (http://infinivision.cn) 贡献给Seata开源社区。现已正式开源,并贡献给 Seata 社区。

-

在Seata开源之前,我们内部开始借鉴GTS以及一些开源项目来实现分布式事务的解决方案TaaS(Transaction as a Service)。

-

在我们完成TaaS的服务端的开发工作后,Seata(当时还叫Fescar)开源了,并且引起了开源社区的广泛关注,加上阿里巴巴的平台影响力以及社区活跃度,我们认为Seata会成为今后开源分布式事务的标准,我们决定TaaS兼容Seata。

-

在发现Seata的服务端的实现是单机的,高可用等并没有实现,于是我们与Seata社区负责人取得联系,并且决定把TaaS开源,回馈开源社区。 同时,我们会长期维护,并且和Seata版本保持同步。

-

目前,Seata官方的Java高可用版本也在开发中,TaaS和该高可用版本的设计思想不同,在今后会长期共存。

-

TaaS已经开源, github (https://github.com/seata/seata-go-server),欢迎大家试用。

-

设计原则

-
    -
  1. 高性能,性能和机器数量成正比,即通过加入新机器到集群中,就可以提升性能
  2. -
  3. 高可用,一台机器出现故障,系统能依旧可以对外提供服务,或者在较短的时间内恢复对外服务(Leader切换的时间)
  4. -
  5. Auto-Rebalance,集群中增加新的机器,或者有机器下线,系统能够自动的做负载均衡
  6. -
  7. 强一致,系统的元数据强一致在多个副本中存储
  8. -
-

设计

-

-

高性能

-

TaaS的性能和机器数量成正比,为了支持这个特性,在TaaS中处理全局事务的最小单元称为Fragment,系统在启动的时候会设定每个Fragment支持的活跃全局事务的并发数,同时系统会对每个Fragment进行采样,一旦发现Fragment超负荷,会生成新的Fragment来处理更多的并发。

-

高可用

-

每个Fragment有多个副本和一个Leader,由Leader来处理请求。当Leader出现故障,系统会产生一个新的Leader来处理请求,在新Leader的选举过程中,这个Fragment对外不提供服务,通常这个间隔时间是几秒钟。

-

强一致

-

TaaS本身不存储全局事务的元数据,元数据存储在Elasticell (https://github.com/deepfabric/elasticell) 中,Elasticell是一个兼容redis协议的分布式的KV存储,它基于Raft协议来保证数据的一致性。

-

Auto-Rebalance

-

随着系统的运行,在系统中会存在许多Fragment以及它们的副本,这样会导致在每个机器上,Fragment的分布不均匀,特别是当旧的机器下线或者新的机器上线的时候。TaaS在启动的时候,会选择3个节点作为调度器的角色,调度器负责调度这些Fragment,用来保证每个机器上的Fragment的数量以及Leader个数大致相等,同时还会保证每个Fragment的副本数维持在指定的副本个数。

-
Fragment副本创建
-

-
    -
  1. t0时间点,Fragment1在Seata-TC1机器上创建
  2. -
  3. t1时间点,Fragment1的副本Fragment1'在Seata-TC2机器上创建
  4. -
  5. t2时间点,Fragment1的副本Fragment1"在Seata-TC3机器上创建
  6. -
-

在t2时间点,Fragment1的三个副本创建完毕。

-
Fragment副本迁移
-

-
    -
  1. t0时刻点,系统一个存在4个Fragment,分别存在于Seata-TC1,Seata-TC2,Seata-TC3三台机器上
  2. -
  3. t1时刻,加入新机器Seata-TC4
  4. -
  5. t2时刻,有3个Fragment的副本被迁移到了Seata-TC4这台机器上
  6. -
-

在线快速体验

-

我们在公网搭建了一个体验的环境:

- -

本地快速体验

-

使用docker-compose快速体验TaaS的功能。

-
git clone https://github.com/seata/taas.git
-docker-compse up -d
-
-

由于组件依赖较多,docker-compose启动30秒后,可以对外服务

-

Seata服务地址

-

服务默认监听在8091端口,修改Seata对应的服务端地址体验

-

Seata UI

-

访问WEB UI http://127.0.0.1:8084/ui/index.html

-

关于InfiniVision

-

深见网络是一家技术驱动的企业级服务提供商,致力于利用人工智能、云计算、区块链、大数据,以及物联网边缘计算技术助力传统企业的数字化转型和升级。深见网络积极拥抱开源文化并将核心算法和架构开源,知名人脸识别软件 InsightFace (https://github.com/deepinsight/insightface) (曾多次获得大规模人脸识别挑战冠军),以及分布式存储引擎 Elasticell (https://github.com/deepfabric/elasticell) 等均是深见网络的开源产品。

-

关于作者

-

作者张旭,开源网关Gateway (https://github.com/fagongzi/gateway) 作者,目前就职于InfiniVision,负责基础架构相关的研发工作。

-
- - - - - - - diff --git a/zh-cn/blog/seata-analysis-go-server.json b/zh-cn/blog/seata-analysis-go-server.json deleted file mode 100644 index 802cea73..00000000 --- a/zh-cn/blog/seata-analysis-go-server.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "filename": "seata-analysis-go-server.md", - "__html": "

Seata 高可用服务端 TaaS 正式开源

\n

前言

\n

TaaS 是 Seata 服务端(TC, Transaction Coordinator)的一种高可用实现,使用 Golang 编写。Taas 由InfiniVision (http://infinivision.cn) 贡献给Seata开源社区。现已正式开源,并贡献给 Seata 社区。

\n

在Seata开源之前,我们内部开始借鉴GTS以及一些开源项目来实现分布式事务的解决方案TaaS(Transaction as a Service)。

\n

在我们完成TaaS的服务端的开发工作后,Seata(当时还叫Fescar)开源了,并且引起了开源社区的广泛关注,加上阿里巴巴的平台影响力以及社区活跃度,我们认为Seata会成为今后开源分布式事务的标准,我们决定TaaS兼容Seata。

\n

在发现Seata的服务端的实现是单机的,高可用等并没有实现,于是我们与Seata社区负责人取得联系,并且决定把TaaS开源,回馈开源社区。 同时,我们会长期维护,并且和Seata版本保持同步。

\n

目前,Seata官方的Java高可用版本也在开发中,TaaS和该高可用版本的设计思想不同,在今后会长期共存。

\n

TaaS已经开源, github (https://github.com/seata/seata-go-server),欢迎大家试用。

\n

设计原则

\n
    \n
  1. 高性能,性能和机器数量成正比,即通过加入新机器到集群中,就可以提升性能
  2. \n
  3. 高可用,一台机器出现故障,系统能依旧可以对外提供服务,或者在较短的时间内恢复对外服务(Leader切换的时间)
  4. \n
  5. Auto-Rebalance,集群中增加新的机器,或者有机器下线,系统能够自动的做负载均衡
  6. \n
  7. 强一致,系统的元数据强一致在多个副本中存储
  8. \n
\n

设计

\n

\"\"

\n

高性能

\n

TaaS的性能和机器数量成正比,为了支持这个特性,在TaaS中处理全局事务的最小单元称为Fragment,系统在启动的时候会设定每个Fragment支持的活跃全局事务的并发数,同时系统会对每个Fragment进行采样,一旦发现Fragment超负荷,会生成新的Fragment来处理更多的并发。

\n

高可用

\n

每个Fragment有多个副本和一个Leader,由Leader来处理请求。当Leader出现故障,系统会产生一个新的Leader来处理请求,在新Leader的选举过程中,这个Fragment对外不提供服务,通常这个间隔时间是几秒钟。

\n

强一致

\n

TaaS本身不存储全局事务的元数据,元数据存储在Elasticell (https://github.com/deepfabric/elasticell) 中,Elasticell是一个兼容redis协议的分布式的KV存储,它基于Raft协议来保证数据的一致性。

\n

Auto-Rebalance

\n

随着系统的运行,在系统中会存在许多Fragment以及它们的副本,这样会导致在每个机器上,Fragment的分布不均匀,特别是当旧的机器下线或者新的机器上线的时候。TaaS在启动的时候,会选择3个节点作为调度器的角色,调度器负责调度这些Fragment,用来保证每个机器上的Fragment的数量以及Leader个数大致相等,同时还会保证每个Fragment的副本数维持在指定的副本个数。

\n
Fragment副本创建
\n

\"\"

\n
    \n
  1. t0时间点,Fragment1在Seata-TC1机器上创建
  2. \n
  3. t1时间点,Fragment1的副本Fragment1'在Seata-TC2机器上创建
  4. \n
  5. t2时间点,Fragment1的副本Fragment1"在Seata-TC3机器上创建
  6. \n
\n

在t2时间点,Fragment1的三个副本创建完毕。

\n
Fragment副本迁移
\n

\"\"

\n
    \n
  1. t0时刻点,系统一个存在4个Fragment,分别存在于Seata-TC1,Seata-TC2,Seata-TC3三台机器上
  2. \n
  3. t1时刻,加入新机器Seata-TC4
  4. \n
  5. t2时刻,有3个Fragment的副本被迁移到了Seata-TC4这台机器上
  6. \n
\n

在线快速体验

\n

我们在公网搭建了一个体验的环境:

\n\n

本地快速体验

\n

使用docker-compose快速体验TaaS的功能。

\n
git clone https://github.com/seata/taas.git\ndocker-compse up -d\n
\n

由于组件依赖较多,docker-compose启动30秒后,可以对外服务

\n

Seata服务地址

\n

服务默认监听在8091端口,修改Seata对应的服务端地址体验

\n

Seata UI

\n

访问WEB UI http://127.0.0.1:8084/ui/index.html

\n

关于InfiniVision

\n

深见网络是一家技术驱动的企业级服务提供商,致力于利用人工智能、云计算、区块链、大数据,以及物联网边缘计算技术助力传统企业的数字化转型和升级。深见网络积极拥抱开源文化并将核心算法和架构开源,知名人脸识别软件 InsightFace (https://github.com/deepinsight/insightface) (曾多次获得大规模人脸识别挑战冠军),以及分布式存储引擎 Elasticell (https://github.com/deepfabric/elasticell) 等均是深见网络的开源产品。

\n

关于作者

\n

作者张旭,开源网关Gateway (https://github.com/fagongzi/gateway) 作者,目前就职于InfiniVision,负责基础架构相关的研发工作。

\n", - "link": "/zh-cn/blog/seata-analysis-go-server.html", - "meta": { - "title": "Seata分布式Go Server正式开源-TaaS设计简介", - "author": "fagongzi(zhangxu19830126@gmail.com)", - "date": "2019/04/23", - "keywords": "seata、分布式事务、高可用" - } -} \ No newline at end of file diff --git a/zh-cn/blog/seata-analysis-java-client.html b/zh-cn/blog/seata-analysis-java-client.html deleted file mode 100644 index f7f26690..00000000 --- a/zh-cn/blog/seata-analysis-java-client.html +++ /dev/null @@ -1,682 +0,0 @@ - - - - - - - - - - 分布式事务之Seata-Client原理及流程详解 - - - - -

前言

-

在分布式系统中,分布式事务是一个必须要解决的问题,目前使用较多的是最终一致性方案。自年初阿里开源了Fescar(四月初更名为Seata)后,该项目受到了极大的关注度,目前已接近8000Star。Seata以高性能和零侵入的方式为目标解决微服务领域的分布式事务难题,目前正处于快速迭代中,近期小目标是生产可用的Mysql版本。关于Seata的总体介绍,可以查看官方WIKI获得更多更全面的内容介绍。

-

本文主要基于spring cloud+spring jpa+spring cloud alibaba fescar+mysql+seata的结构,搭建一个分布式系统的demo,通过seata的debug日志和源代码,从client端(RM、TM)的角度分析说明其工作流程及原理。

-

文中代码基于fescar-0.4.1,由于项目刚更名为seata不久,例如一些包名、类名、jar包名称还都是fescar的命名,故下文中仍使用fescar进行表述。

-

示例项目:https://github.com/fescar-group/fescar-samples/tree/master/springcloud-jpa-seata

-

相关概念

-
    -
  • XID:全局事务的唯一标识,由ip:port:sequence组成
  • -
  • Transaction Coordinator (TC):事务协调器,维护全局事务的运行状态,负责协调并驱动全局事务的提交或回滚
  • -
  • Transaction Manager (TM ):控制全局事务的边界,负责开启一个全局事务,并最终发起全局提交或全局回滚的决议
  • -
  • Resource Manager (RM):控制分支事务,负责分支注册、状态汇报,并接收事务协调器的指令,驱动分支(本地)事务的提交和回滚
  • -
-

分布式框架支持

-

Fescar使用XID表示一个分布式事务,XID需要在一次分布式事务请求所涉的系统中进行传递,从而向feacar-server发送分支事务的处理情况,以及接收feacar-server的commit、rollback指令。 -Fescar官方已支持全版本的dubbo协议,而对于spring cloud(spring-boot)的分布式项目社区也提供了相应的实现

-
<dependency>
-    <groupId>org.springframework.cloud</groupId>
-    <artifactId>spring-cloud-alibaba-fescar</artifactId>
-    <version>2.1.0.BUILD-SNAPSHOT</version>
-</dependency>
-
-

该组件实现了基于RestTemplate、Feign通信时的XID传递功能。

-

业务逻辑

-

业务逻辑是经典的下订单、扣余额、减库存流程。 -根据模块划分为三个独立的服务,且分别连接对应的数据库

-
    -
  • 订单:order-server
  • -
  • 账户:account-server
  • -
  • 库存:storage-server
  • -
-

另外还有发起分布式事务的业务系统

-
    -
  • 业务:business-server
  • -
-

项目结构如下图 -在这里插入图片描述

-

正常业务

-
    -
  1. business发起购买请求
  2. -
  3. storage扣减库存
  4. -
  5. order创建订单
  6. -
  7. account扣减余额
  8. -
-

异常业务

-
    -
  1. business发起购买请求
  2. -
  3. storage扣减库存
  4. -
  5. order创建订单
  6. -
  7. account扣减余额异常
  8. -
-

正常流程下2、3、4步的数据正常更新全局commit,异常流程下的数据则由于第4步的异常报错全局回滚。

-

配置文件

-

fescar的配置入口文件是registry.conf,查看代码ConfigurationFactory得知目前还不能指定该配置文件,所以配置文件名称只能为registry.conf

-
private static final String REGISTRY_CONF = "registry.conf";
-public static final Configuration FILE_INSTANCE = new FileConfiguration(REGISTRY_CONF);
-
-

registry中可以指定具体配置的形式,默认使用file类型,在file.conf中有3部分配置内容

-
    -
  1. transport -transport部分的配置对应NettyServerConfig类,用于定义Netty相关的参数,TM、RM与fescar-server之间使用Netty进行通信
  2. -
  3. service
  4. -
-
	 service {
-	  #vgroup->rgroup
-	  vgroup_mapping.my_test_tx_group = "default"
-	  #配置Client连接TC的地址
-	  default.grouplist = "127.0.0.1:8091"
-	  #degrade current not support
-	  enableDegrade = false
-	  #disable
-	  是否启用seata的分布式事务
-	  disableGlobalTransaction = false
-	}
-
-
    -
  1. client
  2. -
-
	client {
-	  #RM接收TC的commit通知后缓冲上限
-	  async.commit.buffer.limit = 10000
-	  lock {
-	    retry.internal = 10
-	    retry.times = 30
-	  }
-	}
-
-

数据源Proxy

-

除了前面的配置文件,fescar在AT模式下稍微有点代码量的地方就是对数据源的代理指定,且目前只能基于DruidDataSource的代理。 -注:在最新发布的0.4.2版本中已支持任意数据源类型

-
@Bean
-@ConfigurationProperties(prefix = "spring.datasource")
-public DruidDataSource druidDataSource() {
-    DruidDataSource druidDataSource = new DruidDataSource();
-    return druidDataSource;
-}
-
-@Primary
-@Bean("dataSource")
-public DataSourceProxy dataSource(DruidDataSource druidDataSource) {
-    return new DataSourceProxy(druidDataSource);
-}
-
-

使用DataSourceProxy的目的是为了引入ConnectionProxy,fescar无侵入的一方面就体现在ConnectionProxy的实现上,即分支事务加入全局事务的切入点是在本地事务的commit阶段,这样设计可以保证业务数据与undo_log是在一个本地事务中。

-

undo_log是需要在业务库上创建的一个表,fescar依赖该表记录每笔分支事务的状态及二阶段rollback的回放数据。不用担心该表的数据量过大形成单点问题,在全局事务commit的场景下事务对应的undo_log会异步删除。

-
CREATE TABLE `undo_log` (
-  `id` bigint(20) NOT NULL AUTO_INCREMENT,
-  `branch_id` bigint(20) NOT NULL,
-  `xid` varchar(100) NOT NULL,
-  `rollback_info` longblob NOT NULL,
-  `log_status` int(11) NOT NULL,
-  `log_created` datetime NOT NULL,
-  `log_modified` datetime NOT NULL,
-  `ext` varchar(100) DEFAULT NULL,
-  PRIMARY KEY (`id`),
-  UNIQUE KEY `ux_undo_log` (`xid`,`branch_id`)
-) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-
-

启动Server

-

前往https://github.com/seata/seata/releases 下载与Client版本对应的fescar-server,避免由于版本的不同导致的协议不一致问题 -进入解压之后的 bin 目录,执行

-
./fescar-server.sh 8091 ../data
-
-

启动成功输出

-
2019-04-09 20:27:24.637 INFO [main]c.a.fescar.core.rpc.netty.AbstractRpcRemotingServer.start:152 -Server started ... 
-
-

启动Client

-

fescar的加载入口类位于GlobalTransactionAutoConfiguration,对基于spring boot的项目能够自动加载,当然也可以通过其他方式示例化GlobalTransactionScanner

-
@Configuration
-@EnableConfigurationProperties({FescarProperties.class})
-public class GlobalTransactionAutoConfiguration {
-    private final ApplicationContext applicationContext;
-    private final FescarProperties fescarProperties;
-
-    public GlobalTransactionAutoConfiguration(ApplicationContext applicationContext, FescarProperties fescarProperties) {
-        this.applicationContext = applicationContext;
-        this.fescarProperties = fescarProperties;
-    }
-
-	/**
-	* 示例化GlobalTransactionScanner
-	* scanner为client初始化的发起类
-	*/
-    @Bean
-    public GlobalTransactionScanner globalTransactionScanner() {
-        String applicationName = this.applicationContext.getEnvironment().getProperty("spring.application.name");
-        String txServiceGroup = this.fescarProperties.getTxServiceGroup();
-        if (StringUtils.isEmpty(txServiceGroup)) {
-            txServiceGroup = applicationName + "-fescar-service-group";
-            this.fescarProperties.setTxServiceGroup(txServiceGroup);
-        }
-		
-        return new GlobalTransactionScanner(applicationName, txServiceGroup);
-    }
-}
-
-

可以看到支持一个配置项FescarProperties,用于配置事务分组名称

-
spring.cloud.alibaba.fescar.tx-service-group=my_test_tx_group
-
-

如果不指定服务组,则默认使用spring.application.name+ -fescar-service-group生成名称,所以不指定spring.application.name启动会报错

-
@ConfigurationProperties("spring.cloud.alibaba.fescar")
-public class FescarProperties {
-    private String txServiceGroup;
-
-    public FescarProperties() {
-    }
-
-    public String getTxServiceGroup() {
-        return this.txServiceGroup;
-    }
-
-    public void setTxServiceGroup(String txServiceGroup) {
-        this.txServiceGroup = txServiceGroup;
-    }
-}
-
-

获取applicationId和txServiceGroup后,创建GlobalTransactionScanner对象,主要看类中initClient方法

-
private void initClient() {
-    if (StringUtils.isNullOrEmpty(applicationId) || StringUtils.isNullOrEmpty(txServiceGroup)) {
-        throw new IllegalArgumentException(
-            "applicationId: " + applicationId + ", txServiceGroup: " + txServiceGroup);
-    }
-    //init TM
-    TMClient.init(applicationId, txServiceGroup);
-
-    //init RM
-    RMClient.init(applicationId, txServiceGroup);
-  
-}
-
-

方法中可以看到初始化了TMClientRMClient,对于一个服务既可以是TM角色也可以是RM角色,至于什么时候是TM或者RM则要看在一次全局事务中@GlobalTransactional注解标注在哪。 -Client创建的结果是与TC的一个Netty连接,所以在启动日志中可以看到两个Netty Channel,其中标明了transactionRole分别为TMROLERMROLE

-
2019-04-09 13:42:57.417  INFO 93715 --- [imeoutChecker_1] c.a.f.c.rpc.netty.NettyPoolableFactory   : NettyPool create channel to {"address":"127.0.0.1:8091","message":{"applicationId":"business-service","byteBuffer":{"char":"\u0000","direct":false,"double":0.0,"float":0.0,"int":0,"long":0,"readOnly":false,"short":0},"transactionServiceGroup":"my_test_tx_group","typeCode":101,"version":"0.4.1"},"transactionRole":"TMROLE"}
-2019-04-09 13:42:57.505  INFO 93715 --- [imeoutChecker_1] c.a.f.c.rpc.netty.NettyPoolableFactory   : NettyPool create channel to {"address":"127.0.0.1:8091","message":{"applicationId":"business-service","byteBuffer":{"char":"\u0000","direct":false,"double":0.0,"float":0.0,"int":0,"long":0,"readOnly":false,"short":0},"transactionServiceGroup":"my_test_tx_group","typeCode":103,"version":"0.4.1"},"transactionRole":"RMROLE"}
-2019-04-09 13:42:57.629 DEBUG 93715 --- [lector_TMROLE_1] c.a.f.c.rpc.netty.MessageCodecHandler    : Send:RegisterTMRequest{applicationId='business-service', transactionServiceGroup='my_test_tx_group'}
-2019-04-09 13:42:57.629 DEBUG 93715 --- [lector_RMROLE_1] c.a.f.c.rpc.netty.MessageCodecHandler    : Send:RegisterRMRequest{resourceIds='null', applicationId='business-service', transactionServiceGroup='my_test_tx_group'}
-2019-04-09 13:42:57.699 DEBUG 93715 --- [lector_RMROLE_1] c.a.f.c.rpc.netty.MessageCodecHandler    : Receive:version=0.4.1,extraData=null,identified=true,resultCode=null,msg=null,messageId:1
-2019-04-09 13:42:57.699 DEBUG 93715 --- [lector_TMROLE_1] c.a.f.c.rpc.netty.MessageCodecHandler    : Receive:version=0.4.1,extraData=null,identified=true,resultCode=null,msg=null,messageId:2
-2019-04-09 13:42:57.701 DEBUG 93715 --- [lector_RMROLE_1] c.a.f.c.rpc.netty.AbstractRpcRemoting    : com.alibaba.fescar.core.rpc.netty.RmRpcClient@3b06d101 msgId:1, future :com.alibaba.fescar.core.protocol.MessageFuture@28bb1abd, body:version=0.4.1,extraData=null,identified=true,resultCode=null,msg=null
-2019-04-09 13:42:57.701 DEBUG 93715 --- [lector_TMROLE_1] c.a.f.c.rpc.netty.AbstractRpcRemoting    : com.alibaba.fescar.core.rpc.netty.TmRpcClient@65fc3fb7 msgId:2, future :com.alibaba.fescar.core.protocol.MessageFuture@9a1e3df, body:version=0.4.1,extraData=null,identified=true,resultCode=null,msg=null
-2019-04-09 13:42:57.710  INFO 93715 --- [imeoutChecker_1] c.a.fescar.core.rpc.netty.RmRpcClient    : register RM success. server version:0.4.1,channel:[id: 0xe6468995, L:/127.0.0.1:57397 - R:/127.0.0.1:8091]
-2019-04-09 13:42:57.710  INFO 93715 --- [imeoutChecker_1] c.a.f.c.rpc.netty.NettyPoolableFactory   : register success, cost 114 ms, version:0.4.1,role:TMROLE,channel:[id: 0xd22fe0c5, L:/127.0.0.1:57398 - R:/127.0.0.1:8091]
-2019-04-09 13:42:57.711  INFO 93715 --- [imeoutChecker_1] c.a.f.c.rpc.netty.NettyPoolableFactory   : register success, cost 125 ms, version:0.4.1,role:RMROLE,channel:[id: 0xe6468995, L:/127.0.0.1:57397 - R:/127.0.0.1:8091]
-
-
-

日志中可以看到

-
    -
  1. 创建Netty连接
  2. -
  3. 发送注册请求
  4. -
  5. 得到响应结果
  6. -
  7. RmRpcClientTmRpcClient成功实例化
  8. -
-

TM处理流程

-

在本例中,TM的角色是business-service,BusinessService的purchase方法标注了@GlobalTransactional注解

-
@Service
-public class BusinessService {
-
-    @Autowired
-    private StorageFeignClient storageFeignClient;
-    @Autowired
-    private OrderFeignClient orderFeignClient;
-
-    @GlobalTransactional
-    public void purchase(String userId, String commodityCode, int orderCount){
-        storageFeignClient.deduct(commodityCode, orderCount);
-
-        orderFeignClient.create(userId, commodityCode, orderCount);
-    }
-}
-
-

方法调用后将会创建一个全局事务,首先关注@GlobalTransactional注解的作用,在GlobalTransactionalInterceptor中被拦截处理

-
/**
- * AOP拦截方法调用
- */
-@Override
-public Object invoke(final MethodInvocation methodInvocation) throws Throwable {
-    Class<?> targetClass = (methodInvocation.getThis() != null ? AopUtils.getTargetClass(methodInvocation.getThis()) : null);
-    Method specificMethod = ClassUtils.getMostSpecificMethod(methodInvocation.getMethod(), targetClass);
-    final Method method = BridgeMethodResolver.findBridgedMethod(specificMethod);
-
-	//获取方法GlobalTransactional注解
-    final GlobalTransactional globalTransactionalAnnotation = getAnnotation(method, GlobalTransactional.class);
-    final GlobalLock globalLockAnnotation = getAnnotation(method, GlobalLock.class);
-    
-    //如果方法有GlobalTransactional注解,则拦截到相应方法处理
-    if (globalTransactionalAnnotation != null) {
-        return handleGlobalTransaction(methodInvocation, globalTransactionalAnnotation);
-    } else if (globalLockAnnotation != null) {
-        return handleGlobalLock(methodInvocation);
-    } else {
-        return methodInvocation.proceed();
-    }
-}
-
-

handleGlobalTransaction方法中对TransactionalTemplate的execute进行了调用,从类名可以看到这是一个标准的模版方法,它定义了TM对全局事务处理的标准步骤,注释已经比较清楚了

-
public Object execute(TransactionalExecutor business) throws TransactionalExecutor.ExecutionException {
-    // 1. get or create a transaction
-    GlobalTransaction tx = GlobalTransactionContext.getCurrentOrCreate();
-
-    try {
-        // 2. begin transaction
-        try {
-            triggerBeforeBegin();
-            tx.begin(business.timeout(), business.name());
-            triggerAfterBegin();
-        } catch (TransactionException txe) {
-            throw new TransactionalExecutor.ExecutionException(tx, txe,
-                TransactionalExecutor.Code.BeginFailure);
-        }
-        Object rs = null;
-        try {
-            // Do Your Business
-            rs = business.execute();
-        } catch (Throwable ex) {
-            // 3. any business exception, rollback.
-            try {
-                triggerBeforeRollback();
-                tx.rollback();
-                triggerAfterRollback();
-                // 3.1 Successfully rolled back
-                throw new TransactionalExecutor.ExecutionException(tx, TransactionalExecutor.Code.RollbackDone, ex);
-            } catch (TransactionException txe) {
-                // 3.2 Failed to rollback
-                throw new TransactionalExecutor.ExecutionException(tx, txe,
-                    TransactionalExecutor.Code.RollbackFailure, ex);
-            }
-        }
-        // 4. everything is fine, commit.
-        try {
-            triggerBeforeCommit();
-            tx.commit();
-            triggerAfterCommit();
-        } catch (TransactionException txe) {
-            // 4.1 Failed to commit
-            throw new TransactionalExecutor.ExecutionException(tx, txe,
-                TransactionalExecutor.Code.CommitFailure);
-        }
-        return rs;
-    } finally {
-        //5. clear
-        triggerAfterCompletion();
-        cleanUp();
-    }
-}
-
-

通过DefaultGlobalTransaction的begin方法开启全局事务

-
public void begin(int timeout, String name) throws TransactionException {
-    if (role != GlobalTransactionRole.Launcher) {
-        check();
-        if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug("Ignore Begin(): just involved in global transaction [" + xid + "]");
-        }
-        return;
-    }
-    if (xid != null) {
-        throw new IllegalStateException();
-    }
-    if (RootContext.getXID() != null) {
-        throw new IllegalStateException();
-    }
-    //具体开启事务的方法,获取TC返回的XID
-    xid = transactionManager.begin(null, null, name, timeout);
-    status = GlobalStatus.Begin;
-    RootContext.bind(xid);
-    if (LOGGER.isDebugEnabled()) {
-        LOGGER.debug("Begin a NEW global transaction [" + xid + "]");
-    }
-}
-
-

方法开头处if (role != GlobalTransactionRole.Launcher)对role的判断有关键的作用,表明当前是全局事务的发起者(Launcher)还是参与者(Participant)。如果在分布式事务的下游系统方法中也加上@GlobalTransactional注解,那么它的角色就是Participant,会忽略后面的begin直接return,而判断是Launcher还是Participant是根据当前上下文是否已存在XID来判断,没有XID的就是Launcher,已经存在XID的就是Participant. -由此可见,全局事务的创建只能由Launcher执行,而一次分布式事务中也只有一个Launcher存在。

-

DefaultTransactionManager负责TM与TC通讯,发送begin、commit、rollback指令

-
@Override
-public String begin(String applicationId, String transactionServiceGroup, String name, int timeout)
-    throws TransactionException {
-    GlobalBeginRequest request = new GlobalBeginRequest();
-    request.setTransactionName(name);
-    request.setTimeout(timeout);
-    GlobalBeginResponse response = (GlobalBeginResponse)syncCall(request);
-    return response.getXid();
-}
-
-

至此拿到fescar-server返回的XID表示一个全局事务创建成功,日志中也反应了上述流程

-
2019-04-09 13:46:57.417 DEBUG 31326 --- [nio-8084-exec-1] c.a.f.c.rpc.netty.AbstractRpcRemoting    : offer message: timeout=60000,transactionName=purchase(java.lang.String,java.lang.String,int)
-2019-04-09 13:46:57.417 DEBUG 31326 --- [geSend_TMROLE_1] c.a.f.c.rpc.netty.AbstractRpcRemoting    : write message:FescarMergeMessage timeout=60000,transactionName=purchase(java.lang.String,java.lang.String,int), channel:[id: 0xa148545e, L:/127.0.0.1:56120 - R:/127.0.0.1:8091],active?true,writable?true,isopen?true
-2019-04-09 13:46:57.418 DEBUG 31326 --- [lector_TMROLE_1] c.a.f.c.rpc.netty.MessageCodecHandler    : Send:FescarMergeMessage timeout=60000,transactionName=purchase(java.lang.String,java.lang.String,int)
-2019-04-09 13:46:57.421 DEBUG 31326 --- [lector_TMROLE_1] c.a.f.c.rpc.netty.MessageCodecHandler    : Receive:MergeResultMessage com.alibaba.fescar.core.protocol.transaction.GlobalBeginResponse@2dc480dc,messageId:1196
-2019-04-09 13:46:57.421 DEBUG 31326 --- [nio-8084-exec-1] c.a.fescar.core.context.RootContext      : bind 192.168.224.93:8091:2008502699
-2019-04-09 13:46:57.421 DEBUG 31326 --- [nio-8084-exec-1] c.a.f.tm.api.DefaultGlobalTransaction    : Begin a NEW global transaction [192.168.224.93:8091:2008502699]
-
-

全局事务创建后,就开始执行business.execute(),即业务代码storageFeignClient.deduct(commodityCode, orderCount)进入RM处理流程,此处的业务逻辑为调用storage-service的扣减库存接口。

-

RM处理流程

-
@GetMapping(path = "/deduct")
-public Boolean deduct(String commodityCode, Integer count){
-    storageService.deduct(commodityCode,count);
-    return true;
-}
-
-@Transactional
-public void deduct(String commodityCode, int count){
-    Storage storage = storageDAO.findByCommodityCode(commodityCode);
-    storage.setCount(storage.getCount()-count);
-
-    storageDAO.save(storage);
-}
-
-

storage的接口和service方法并未出现fescar相关的代码和注解,体现了fescar的无侵入。那它是如何加入到这次全局事务中的呢?答案在ConnectionProxy中,这也是前面说为什么必须要使用DataSourceProxy的原因,通过DataSourceProxy才能在业务代码的本地事务提交时,fescar通过该切入点,向TC注册分支事务并发送RM的处理结果。

-

由于业务代码本身的事务提交被ConnectionProxy代理实现,所以在提交本地事务时,实际执行的是ConnectionProxy的commit方法

-
public void commit() throws SQLException {
-	//如果当前是全局事务,则执行全局事务的提交
-	//判断是不是全局事务,就是看当前上下文是否存在XID
-    if (context.inGlobalTransaction()) {
-        processGlobalTransactionCommit();
-    } else if (context.isGlobalLockRequire()) {
-        processLocalCommitWithGlobalLocks();
-    } else {
-        targetConnection.commit();
-    }
-}
-    
-private void processGlobalTransactionCommit() throws SQLException {
-    try {
-    	//首先是向TC注册RM,拿到TC分配的branchId
-        register();
-    } catch (TransactionException e) {
-        recognizeLockKeyConflictException(e);
-    }
-
-    try {
-        if (context.hasUndoLog()) {
-        	//写入undolog
-            UndoLogManager.flushUndoLogs(this);
-        }
-
-		//提交本地事务,写入undo_log和业务数据在同一个本地事务中
-        targetConnection.commit();
-    } catch (Throwable ex) {
-    	//向TC发送RM的事务处理失败的通知
-        report(false);
-        if (ex instanceof SQLException) {
-            throw new SQLException(ex);
-        }
-    }
-	//向TC发送RM的事务处理成功的通知
-    report(true);
-    context.reset();
-}
-    
-private void register() throws TransactionException {
-	//注册RM,构建request通过netty向TC发送注册指令
-    Long branchId = DefaultResourceManager.get().branchRegister(BranchType.AT, getDataSourceProxy().getResourceId(),
-            null, context.getXid(), null, context.buildLockKeys());
-    //将返回的branchId存在上下文中
-    context.setBranchId(branchId);
-}
-
-

通过日志印证一下上面的流程

-
2019-04-09 21:57:48.341 DEBUG 38933 --- [nio-8081-exec-1] o.s.c.a.f.web.FescarHandlerInterceptor   : xid in RootContext null xid in RpcContext 192.168.0.2:8091:2008546211
-2019-04-09 21:57:48.341 DEBUG 38933 --- [nio-8081-exec-1] c.a.fescar.core.context.RootContext      : bind 192.168.0.2:8091:2008546211
-2019-04-09 21:57:48.341 DEBUG 38933 --- [nio-8081-exec-1] o.s.c.a.f.web.FescarHandlerInterceptor   : bind 192.168.0.2:8091:2008546211 to RootContext
-2019-04-09 21:57:48.386  INFO 38933 --- [nio-8081-exec-1] o.h.h.i.QueryTranslatorFactoryInitiator  : HHH000397: Using ASTQueryTranslatorFactory
-Hibernate: select storage0_.id as id1_0_, storage0_.commodity_code as commodit2_0_, storage0_.count as count3_0_ from storage_tbl storage0_ where storage0_.commodity_code=?
-Hibernate: update storage_tbl set count=? where id=?
-2019-04-09 21:57:48.673  INFO 38933 --- [nio-8081-exec-1] c.a.fescar.core.rpc.netty.RmRpcClient    : will connect to 192.168.0.2:8091
-2019-04-09 21:57:48.673  INFO 38933 --- [nio-8081-exec-1] c.a.fescar.core.rpc.netty.RmRpcClient    : RM will register :jdbc:mysql://127.0.0.1:3306/db_storage?useSSL=false
-2019-04-09 21:57:48.673  INFO 38933 --- [nio-8081-exec-1] c.a.f.c.rpc.netty.NettyPoolableFactory   : NettyPool create channel to {"address":"192.168.0.2:8091","message":{"applicationId":"storage-service","byteBuffer":{"char":"\u0000","direct":false,"double":0.0,"float":0.0,"int":0,"long":0,"readOnly":false,"short":0},"resourceIds":"jdbc:mysql://127.0.0.1:3306/db_storage?useSSL=false","transactionServiceGroup":"hello-service-fescar-service-group","typeCode":103,"version":"0.4.0"},"transactionRole":"RMROLE"}
-2019-04-09 21:57:48.677 DEBUG 38933 --- [lector_RMROLE_1] c.a.f.c.rpc.netty.MessageCodecHandler    : Send:RegisterRMRequest{resourceIds='jdbc:mysql://127.0.0.1:3306/db_storage?useSSL=false', applicationId='storage-service', transactionServiceGroup='hello-service-fescar-service-group'}
-2019-04-09 21:57:48.680 DEBUG 38933 --- [lector_RMROLE_1] c.a.f.c.rpc.netty.MessageCodecHandler    : Receive:version=0.4.1,extraData=null,identified=true,resultCode=null,msg=null,messageId:9
-2019-04-09 21:57:48.680 DEBUG 38933 --- [lector_RMROLE_1] c.a.f.c.rpc.netty.AbstractRpcRemoting    : com.alibaba.fescar.core.rpc.netty.RmRpcClient@7d61f5d4 msgId:9, future :com.alibaba.fescar.core.protocol.MessageFuture@186cd3e0, body:version=0.4.1,extraData=null,identified=true,resultCode=null,msg=null
-2019-04-09 21:57:48.680  INFO 38933 --- [nio-8081-exec-1] c.a.fescar.core.rpc.netty.RmRpcClient    : register RM success. server version:0.4.1,channel:[id: 0xd40718e3, L:/192.168.0.2:62607 - R:/192.168.0.2:8091]
-2019-04-09 21:57:48.680  INFO 38933 --- [nio-8081-exec-1] c.a.f.c.rpc.netty.NettyPoolableFactory   : register success, cost 3 ms, version:0.4.1,role:RMROLE,channel:[id: 0xd40718e3, L:/192.168.0.2:62607 - R:/192.168.0.2:8091]
-2019-04-09 21:57:48.680 DEBUG 38933 --- [nio-8081-exec-1] c.a.f.c.rpc.netty.AbstractRpcRemoting    : offer message: transactionId=2008546211,branchType=AT,resourceId=jdbc:mysql://127.0.0.1:3306/db_storage?useSSL=false,lockKey=storage_tbl:1
-2019-04-09 21:57:48.681 DEBUG 38933 --- [geSend_RMROLE_1] c.a.f.c.rpc.netty.AbstractRpcRemoting    : write message:FescarMergeMessage transactionId=2008546211,branchType=AT,resourceId=jdbc:mysql://127.0.0.1:3306/db_storage?useSSL=false,lockKey=storage_tbl:1, channel:[id: 0xd40718e3, L:/192.168.0.2:62607 - R:/192.168.0.2:8091],active?true,writable?true,isopen?true
-2019-04-09 21:57:48.681 DEBUG 38933 --- [lector_RMROLE_1] c.a.f.c.rpc.netty.MessageCodecHandler    : Send:FescarMergeMessage transactionId=2008546211,branchType=AT,resourceId=jdbc:mysql://127.0.0.1:3306/db_storage?useSSL=false,lockKey=storage_tbl:1
-2019-04-09 21:57:48.687 DEBUG 38933 --- [lector_RMROLE_1] c.a.f.c.rpc.netty.MessageCodecHandler    : Receive:MergeResultMessage BranchRegisterResponse: transactionId=2008546211,branchId=2008546212,result code =Success,getMsg =null,messageId:11
-2019-04-09 21:57:48.702 DEBUG 38933 --- [nio-8081-exec-1] c.a.f.rm.datasource.undo.UndoLogManager  : Flushing UNDO LOG: {"branchId":2008546212,"sqlUndoLogs":[{"afterImage":{"rows":[{"fields":[{"keyType":"PrimaryKey","name":"id","type":4,"value":1},{"keyType":"NULL","name":"count","type":4,"value":993}]}],"tableName":"storage_tbl"},"beforeImage":{"rows":[{"fields":[{"keyType":"PrimaryKey","name":"id","type":4,"value":1},{"keyType":"NULL","name":"count","type":4,"value":994}]}],"tableName":"storage_tbl"},"sqlType":"UPDATE","tableName":"storage_tbl"}],"xid":"192.168.0.2:8091:2008546211"}
-2019-04-09 21:57:48.755 DEBUG 38933 --- [nio-8081-exec-1] c.a.f.c.rpc.netty.AbstractRpcRemoting    : offer message: transactionId=2008546211,branchId=2008546212,resourceId=null,status=PhaseOne_Done,applicationData=null
-2019-04-09 21:57:48.755 DEBUG 38933 --- [geSend_RMROLE_1] c.a.f.c.rpc.netty.AbstractRpcRemoting    : write message:FescarMergeMessage transactionId=2008546211,branchId=2008546212,resourceId=null,status=PhaseOne_Done,applicationData=null, channel:[id: 0xd40718e3, L:/192.168.0.2:62607 - R:/192.168.0.2:8091],active?true,writable?true,isopen?true
-2019-04-09 21:57:48.756 DEBUG 38933 --- [lector_RMROLE_1] c.a.f.c.rpc.netty.MessageCodecHandler    : Send:FescarMergeMessage transactionId=2008546211,branchId=2008546212,resourceId=null,status=PhaseOne_Done,applicationData=null
-2019-04-09 21:57:48.758 DEBUG 38933 --- [lector_RMROLE_1] c.a.f.c.rpc.netty.MessageCodecHandler    : Receive:MergeResultMessage com.alibaba.fescar.core.protocol.transaction.BranchReportResponse@582a08cf,messageId:13
-2019-04-09 21:57:48.799 DEBUG 38933 --- [nio-8081-exec-1] c.a.fescar.core.context.RootContext      : unbind 192.168.0.2:8091:2008546211
-2019-04-09 21:57:48.799 DEBUG 38933 --- [nio-8081-exec-1] o.s.c.a.f.web.FescarHandlerInterceptor   : unbind 192.168.0.2:8091:2008546211 from RootContext
-
-
    -
  1. 获取business-service传来的XID
  2. -
  3. 绑定XID到当前上下文中
  4. -
  5. 执行业务逻辑sql
  6. -
  7. 向TC创建本次RM的Netty连接
  8. -
  9. 向TC发送分支事务的相关信息
  10. -
  11. 获得TC返回的branchId
  12. -
  13. 记录Undo Log数据
  14. -
  15. 向TC发送本次事务PhaseOne阶段的处理结果
  16. -
  17. 从当前上下文中解绑XID
  18. -
-

其中第1步和第9步,是在FescarHandlerInterceptor中完成的,该类并不属于fescar,是前面提到的spring-cloud-alibaba-fescar,它实现了基于feign、rest通信时将xid bind和unbind到当前请求上下文中。到这里RM完成了PhaseOne阶段的工作,接着看PhaseTwo阶段的处理逻辑。

-

事务提交

-

各分支事务执行完成后,TC对各RM的汇报结果进行汇总,给各RM发送commit或rollback的指令

-
2019-04-09 21:57:49.813 DEBUG 38933 --- [lector_RMROLE_1] c.a.f.c.rpc.netty.MessageCodecHandler    : Receive:xid=192.168.0.2:8091:2008546211,branchId=2008546212,branchType=AT,resourceId=jdbc:mysql://127.0.0.1:3306/db_storage?useSSL=false,applicationData=null,messageId:1
-2019-04-09 21:57:49.813 DEBUG 38933 --- [lector_RMROLE_1] c.a.f.c.rpc.netty.AbstractRpcRemoting    : com.alibaba.fescar.core.rpc.netty.RmRpcClient@7d61f5d4 msgId:1, body:xid=192.168.0.2:8091:2008546211,branchId=2008546212,branchType=AT,resourceId=jdbc:mysql://127.0.0.1:3306/db_storage?useSSL=false,applicationData=null
-2019-04-09 21:57:49.814  INFO 38933 --- [atch_RMROLE_1_8] c.a.f.core.rpc.netty.RmMessageListener   : onMessage:xid=192.168.0.2:8091:2008546211,branchId=2008546212,branchType=AT,resourceId=jdbc:mysql://127.0.0.1:3306/db_storage?useSSL=false,applicationData=null
-2019-04-09 21:57:49.816  INFO 38933 --- [atch_RMROLE_1_8] com.alibaba.fescar.rm.AbstractRMHandler  : Branch committing: 192.168.0.2:8091:2008546211 2008546212 jdbc:mysql://127.0.0.1:3306/db_storage?useSSL=false null
-2019-04-09 21:57:49.816  INFO 38933 --- [atch_RMROLE_1_8] com.alibaba.fescar.rm.AbstractRMHandler  : Branch commit result: PhaseTwo_Committed
-2019-04-09 21:57:49.817  INFO 38933 --- [atch_RMROLE_1_8] c.a.fescar.core.rpc.netty.RmRpcClient    : RmRpcClient sendResponse branchStatus=PhaseTwo_Committed,result code =Success,getMsg =null
-2019-04-09 21:57:49.817 DEBUG 38933 --- [atch_RMROLE_1_8] c.a.f.c.rpc.netty.AbstractRpcRemoting    : send response:branchStatus=PhaseTwo_Committed,result code =Success,getMsg =null,channel:[id: 0xd40718e3, L:/192.168.0.2:62607 - R:/192.168.0.2:8091]
-2019-04-09 21:57:49.817 DEBUG 38933 --- [lector_RMROLE_1] c.a.f.c.rpc.netty.MessageCodecHandler    : Send:branchStatus=PhaseTwo_Committed,result code =Success,getMsg =null
-
-

从日志中可以看到

-
    -
  1. RM收到XID=192.168.0.2:8091:2008546211,branchId=2008546212的commit通知
  2. -
  3. 执行commit动作
  4. -
  5. 将commit结果发送给TC,branchStatus为PhaseTwo_Committed
  6. -
-

具体看下二阶段commit的执行过程,在AbstractRMHandler类的doBranchCommit方法

-
/**
- * 拿到通知的xid、branchId等关键参数
- * 然后调用RM的branchCommit
- */
-protected void doBranchCommit(BranchCommitRequest request, BranchCommitResponse response) throws TransactionException {
-    String xid = request.getXid();
-    long branchId = request.getBranchId();
-    String resourceId = request.getResourceId();
-    String applicationData = request.getApplicationData();
-    LOGGER.info("Branch committing: " + xid + " " + branchId + " " + resourceId + " " + applicationData);
-    BranchStatus status = getResourceManager().branchCommit(request.getBranchType(), xid, branchId, resourceId, applicationData);
-    response.setBranchStatus(status);
-    LOGGER.info("Branch commit result: " + status);
-}
-
-

最终会将branchCommit的请求调用到AsyncWorker的branchCommit方法。AsyncWorker的处理方式是fescar架构的一个关键部分,因为大部分事务都是会正常提交的,所以在PhaseOne阶段就已经结束了,这样就可以将锁最快的释放。PhaseTwo阶段接收commit的指令后,异步处理即可。将PhaseTwo的时间消耗排除在一次分布式事务之外。

-
private static final List<Phase2Context> ASYNC_COMMIT_BUFFER = Collections.synchronizedList( new ArrayList<Phase2Context>());
-        
-/**
- * 将需要提交的XID加入list
- */
-@Override
-public BranchStatus branchCommit(BranchType branchType, String xid, long branchId, String resourceId, String applicationData) throws TransactionException {
-    if (ASYNC_COMMIT_BUFFER.size() < ASYNC_COMMIT_BUFFER_LIMIT) {
-        ASYNC_COMMIT_BUFFER.add(new Phase2Context(branchType, xid, branchId, resourceId, applicationData));
-    } else {
-        LOGGER.warn("Async commit buffer is FULL. Rejected branch [" + branchId + "/" + xid + "] will be handled by housekeeping later.");
-    }
-    return BranchStatus.PhaseTwo_Committed;
-}
-	
-/**
- * 通过定时任务消费list中的XID
- */
-public synchronized void init() {
-    LOGGER.info("Async Commit Buffer Limit: " + ASYNC_COMMIT_BUFFER_LIMIT);
-    timerExecutor = new ScheduledThreadPoolExecutor(1,
-        new NamedThreadFactory("AsyncWorker", 1, true));
-    timerExecutor.scheduleAtFixedRate(new Runnable() {
-        @Override
-        public void run() {
-            try {
-                doBranchCommits();
-            } catch (Throwable e) {
-                LOGGER.info("Failed at async committing ... " + e.getMessage());
-            }
-        }
-    }, 10, 1000 * 1, TimeUnit.MILLISECONDS);
-}
-	
-private void doBranchCommits() {
-    if (ASYNC_COMMIT_BUFFER.size() == 0) {
-        return;
-    }
-    Map<String, List<Phase2Context>> mappedContexts = new HashMap<>();
-    Iterator<Phase2Context> iterator = ASYNC_COMMIT_BUFFER.iterator();
-    
-    //一次定时循环取出ASYNC_COMMIT_BUFFER中的所有待办数据
-    //以resourceId作为key分组待commit数据,resourceId是一个数据库的连接url
-    //在前面的日志中可以看到,目的是为了覆盖应用的多数据源创建
-    while (iterator.hasNext()) {
-        Phase2Context commitContext = iterator.next();
-        List<Phase2Context> contextsGroupedByResourceId = mappedContexts.get(commitContext.resourceId);
-        if (contextsGroupedByResourceId == null) {
-            contextsGroupedByResourceId = new ArrayList<>();
-            mappedContexts.put(commitContext.resourceId, contextsGroupedByResourceId);
-        }
-        contextsGroupedByResourceId.add(commitContext);
-
-        iterator.remove();
-
-    }
-
-    for (Map.Entry<String, List<Phase2Context>> entry : mappedContexts.entrySet()) {
-        Connection conn = null;
-        try {
-            try {
-            	//根据resourceId获取数据源以及连接
-                DataSourceProxy dataSourceProxy = DataSourceManager.get().get(entry.getKey());
-                conn = dataSourceProxy.getPlainConnection();
-            } catch (SQLException sqle) {
-                LOGGER.warn("Failed to get connection for async committing on " + entry.getKey(), sqle);
-                continue;
-            }
-            List<Phase2Context> contextsGroupedByResourceId = entry.getValue();
-            for (Phase2Context commitContext : contextsGroupedByResourceId) {
-                try {
-                	//执行undolog的处理,即删除xid、branchId对应的记录
-                    UndoLogManager.deleteUndoLog(commitContext.xid, commitContext.branchId, conn);
-                } catch (Exception ex) {
-                    LOGGER.warn(
-                        "Failed to delete undo log [" + commitContext.branchId + "/" + commitContext.xid + "]", ex);
-                }
-            }
-
-        } finally {
-            if (conn != null) {
-                try {
-                    conn.close();
-                } catch (SQLException closeEx) {
-                    LOGGER.warn("Failed to close JDBC resource while deleting undo_log ", closeEx);
-                }
-            }
-        }
-    }
-}
-
-

所以对于commit动作的处理,RM只需删除xid、branchId对应的undo_log即可。

-

事务回滚

-

对于rollback场景的触发有两种情况

-
    -
  1. 分支事务处理异常,即ConnectionProxyreport(false)的情况
  2. -
  3. TM捕获到下游系统上抛的异常,即发起全局事务标有@GlobalTransactional注解的方法捕获到的异常。在前面TransactionalTemplate类的execute模版方法中,对business.execute()的调用进行了catch,catch后会调用rollback,由TM通知TC对应XID需要回滚事务
  4. -
-
 public void rollback() throws TransactionException {
-    //只有Launcher能发起这个rollback
-    if (role == GlobalTransactionRole.Participant) {
-        // Participant has no responsibility of committing
-        if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug("Ignore Rollback(): just involved in global transaction [" + xid + "]");
-        }
-        return;
-    }
-    if (xid == null) {
-        throw new IllegalStateException();
-    }
-
-    status = transactionManager.rollback(xid);
-    if (RootContext.getXID() != null) {
-        if (xid.equals(RootContext.getXID())) {
-            RootContext.unbind();
-        }
-    }
-}
-
-

TC汇总后向参与者发送rollback指令,RM在AbstractRMHandler类的doBranchRollback方法中接收这个rollback的通知

-
protected void doBranchRollback(BranchRollbackRequest request, BranchRollbackResponse response) throws TransactionException {
-    String xid = request.getXid();
-    long branchId = request.getBranchId();
-    String resourceId = request.getResourceId();
-    String applicationData = request.getApplicationData();
-    LOGGER.info("Branch rolling back: " + xid + " " + branchId + " " + resourceId);
-    BranchStatus status = getResourceManager().branchRollback(request.getBranchType(), xid, branchId, resourceId, applicationData);
-    response.setBranchStatus(status);
-    LOGGER.info("Branch rollback result: " + status);
-}
-
-

然后将rollback请求传递到DataSourceManager类的branchRollback方法

-
public BranchStatus branchRollback(BranchType branchType, String xid, long branchId, String resourceId, String applicationData) throws TransactionException {
-    //根据resourceId获取对应的数据源
-    DataSourceProxy dataSourceProxy = get(resourceId);
-    if (dataSourceProxy == null) {
-        throw new ShouldNeverHappenException();
-    }
-    try {
-        UndoLogManager.undo(dataSourceProxy, xid, branchId);
-    } catch (TransactionException te) {
-        if (te.getCode() == TransactionExceptionCode.BranchRollbackFailed_Unretriable) {
-            return BranchStatus.PhaseTwo_RollbackFailed_Unretryable;
-        } else {
-            return BranchStatus.PhaseTwo_RollbackFailed_Retryable;
-        }
-    }
-    return BranchStatus.PhaseTwo_Rollbacked;
-}
-
-

最终会执行UndoLogManager类的undo方法,因为是纯jdbc操作代码比较长就不贴出来了,可以通过连接到github查看源码,说一下undo的具体流程

-
    -
  1. 根据xid和branchId查找PhaseOne阶段提交的undo_log
  2. -
  3. 如果找到了就根据undo_log中记录的数据生成回放sql并执行,即还原PhaseOne阶段修改的数据
  4. -
  5. 第2步处理完后,删除该条undo_log数据
  6. -
  7. 如果第1步没有找到对应的undo_log,就插入一条状态为GlobalFinished的undo_log. -出现没找到的原因可能是PhaseOne阶段的本地事务异常了,导致没有正常写入。 -因为xid和branchId是唯一索引,所以第4步的插入,可以防止PhaseOne阶段恢复后的成功写入,那么PhaseOne阶段就会异常,这样一来业务数据也就不会提交成功,数据达到了最终回滚了的效果
  8. -
-

总结

-

本地结合分布式业务场景,分析了fescar client侧的主要处理流程,对TM和RM角色的主要源码进行了解析,希望能对大家理解fescar的工作原理有所帮助。

-

随着fescar的快速迭代以及后期的Roadmap规划,假以时日相信fescar能够成为开源分布式事务的标杆解决方案。

-
- - - - - - - diff --git a/zh-cn/blog/seata-analysis-java-client.json b/zh-cn/blog/seata-analysis-java-client.json deleted file mode 100644 index 11ec439b..00000000 --- a/zh-cn/blog/seata-analysis-java-client.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "filename": "seata-analysis-java-client.md", - "__html": "

前言

\n

在分布式系统中,分布式事务是一个必须要解决的问题,目前使用较多的是最终一致性方案。自年初阿里开源了Fescar(四月初更名为Seata)后,该项目受到了极大的关注度,目前已接近8000Star。Seata以高性能和零侵入的方式为目标解决微服务领域的分布式事务难题,目前正处于快速迭代中,近期小目标是生产可用的Mysql版本。关于Seata的总体介绍,可以查看官方WIKI获得更多更全面的内容介绍。

\n

本文主要基于spring cloud+spring jpa+spring cloud alibaba fescar+mysql+seata的结构,搭建一个分布式系统的demo,通过seata的debug日志和源代码,从client端(RM、TM)的角度分析说明其工作流程及原理。

\n

文中代码基于fescar-0.4.1,由于项目刚更名为seata不久,例如一些包名、类名、jar包名称还都是fescar的命名,故下文中仍使用fescar进行表述。

\n

示例项目:https://github.com/fescar-group/fescar-samples/tree/master/springcloud-jpa-seata

\n

相关概念

\n
    \n
  • XID:全局事务的唯一标识,由ip:port:sequence组成
  • \n
  • Transaction Coordinator (TC):事务协调器,维护全局事务的运行状态,负责协调并驱动全局事务的提交或回滚
  • \n
  • Transaction Manager (TM ):控制全局事务的边界,负责开启一个全局事务,并最终发起全局提交或全局回滚的决议
  • \n
  • Resource Manager (RM):控制分支事务,负责分支注册、状态汇报,并接收事务协调器的指令,驱动分支(本地)事务的提交和回滚
  • \n
\n

分布式框架支持

\n

Fescar使用XID表示一个分布式事务,XID需要在一次分布式事务请求所涉的系统中进行传递,从而向feacar-server发送分支事务的处理情况,以及接收feacar-server的commit、rollback指令。\nFescar官方已支持全版本的dubbo协议,而对于spring cloud(spring-boot)的分布式项目社区也提供了相应的实现

\n
<dependency>\n    <groupId>org.springframework.cloud</groupId>\n    <artifactId>spring-cloud-alibaba-fescar</artifactId>\n    <version>2.1.0.BUILD-SNAPSHOT</version>\n</dependency>\n
\n

该组件实现了基于RestTemplate、Feign通信时的XID传递功能。

\n

业务逻辑

\n

业务逻辑是经典的下订单、扣余额、减库存流程。\n根据模块划分为三个独立的服务,且分别连接对应的数据库

\n
    \n
  • 订单:order-server
  • \n
  • 账户:account-server
  • \n
  • 库存:storage-server
  • \n
\n

另外还有发起分布式事务的业务系统

\n
    \n
  • 业务:business-server
  • \n
\n

项目结构如下图\n\"在这里插入图片描述\"

\n

正常业务

\n
    \n
  1. business发起购买请求
  2. \n
  3. storage扣减库存
  4. \n
  5. order创建订单
  6. \n
  7. account扣减余额
  8. \n
\n

异常业务

\n
    \n
  1. business发起购买请求
  2. \n
  3. storage扣减库存
  4. \n
  5. order创建订单
  6. \n
  7. account扣减余额异常
  8. \n
\n

正常流程下2、3、4步的数据正常更新全局commit,异常流程下的数据则由于第4步的异常报错全局回滚。

\n

配置文件

\n

fescar的配置入口文件是registry.conf,查看代码ConfigurationFactory得知目前还不能指定该配置文件,所以配置文件名称只能为registry.conf

\n
private static final String REGISTRY_CONF = \"registry.conf\";\npublic static final Configuration FILE_INSTANCE = new FileConfiguration(REGISTRY_CONF);\n
\n

registry中可以指定具体配置的形式,默认使用file类型,在file.conf中有3部分配置内容

\n
    \n
  1. transport\ntransport部分的配置对应NettyServerConfig类,用于定义Netty相关的参数,TM、RM与fescar-server之间使用Netty进行通信
  2. \n
  3. service
  4. \n
\n
\t service {\n\t  #vgroup->rgroup\n\t  vgroup_mapping.my_test_tx_group = \"default\"\n\t  #配置Client连接TC的地址\n\t  default.grouplist = \"127.0.0.1:8091\"\n\t  #degrade current not support\n\t  enableDegrade = false\n\t  #disable\n\t  是否启用seata的分布式事务\n\t  disableGlobalTransaction = false\n\t}\n
\n
    \n
  1. client
  2. \n
\n
\tclient {\n\t  #RM接收TC的commit通知后缓冲上限\n\t  async.commit.buffer.limit = 10000\n\t  lock {\n\t    retry.internal = 10\n\t    retry.times = 30\n\t  }\n\t}\n
\n

数据源Proxy

\n

除了前面的配置文件,fescar在AT模式下稍微有点代码量的地方就是对数据源的代理指定,且目前只能基于DruidDataSource的代理。\n注:在最新发布的0.4.2版本中已支持任意数据源类型

\n
@Bean\n@ConfigurationProperties(prefix = \"spring.datasource\")\npublic DruidDataSource druidDataSource() {\n    DruidDataSource druidDataSource = new DruidDataSource();\n    return druidDataSource;\n}\n\n@Primary\n@Bean(\"dataSource\")\npublic DataSourceProxy dataSource(DruidDataSource druidDataSource) {\n    return new DataSourceProxy(druidDataSource);\n}\n
\n

使用DataSourceProxy的目的是为了引入ConnectionProxy,fescar无侵入的一方面就体现在ConnectionProxy的实现上,即分支事务加入全局事务的切入点是在本地事务的commit阶段,这样设计可以保证业务数据与undo_log是在一个本地事务中。

\n

undo_log是需要在业务库上创建的一个表,fescar依赖该表记录每笔分支事务的状态及二阶段rollback的回放数据。不用担心该表的数据量过大形成单点问题,在全局事务commit的场景下事务对应的undo_log会异步删除。

\n
CREATE TABLE `undo_log` (\n  `id` bigint(20) NOT NULL AUTO_INCREMENT,\n  `branch_id` bigint(20) NOT NULL,\n  `xid` varchar(100) NOT NULL,\n  `rollback_info` longblob NOT NULL,\n  `log_status` int(11) NOT NULL,\n  `log_created` datetime NOT NULL,\n  `log_modified` datetime NOT NULL,\n  `ext` varchar(100) DEFAULT NULL,\n  PRIMARY KEY (`id`),\n  UNIQUE KEY `ux_undo_log` (`xid`,`branch_id`)\n) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;\n
\n

启动Server

\n

前往https://github.com/seata/seata/releases 下载与Client版本对应的fescar-server,避免由于版本的不同导致的协议不一致问题\n进入解压之后的 bin 目录,执行

\n
./fescar-server.sh 8091 ../data\n
\n

启动成功输出

\n
2019-04-09 20:27:24.637 INFO [main]c.a.fescar.core.rpc.netty.AbstractRpcRemotingServer.start:152 -Server started ... \n
\n

启动Client

\n

fescar的加载入口类位于GlobalTransactionAutoConfiguration,对基于spring boot的项目能够自动加载,当然也可以通过其他方式示例化GlobalTransactionScanner

\n
@Configuration\n@EnableConfigurationProperties({FescarProperties.class})\npublic class GlobalTransactionAutoConfiguration {\n    private final ApplicationContext applicationContext;\n    private final FescarProperties fescarProperties;\n\n    public GlobalTransactionAutoConfiguration(ApplicationContext applicationContext, FescarProperties fescarProperties) {\n        this.applicationContext = applicationContext;\n        this.fescarProperties = fescarProperties;\n    }\n\n\t/**\n\t* 示例化GlobalTransactionScanner\n\t* scanner为client初始化的发起类\n\t*/\n    @Bean\n    public GlobalTransactionScanner globalTransactionScanner() {\n        String applicationName = this.applicationContext.getEnvironment().getProperty(\"spring.application.name\");\n        String txServiceGroup = this.fescarProperties.getTxServiceGroup();\n        if (StringUtils.isEmpty(txServiceGroup)) {\n            txServiceGroup = applicationName + \"-fescar-service-group\";\n            this.fescarProperties.setTxServiceGroup(txServiceGroup);\n        }\n\t\t\n        return new GlobalTransactionScanner(applicationName, txServiceGroup);\n    }\n}\n
\n

可以看到支持一个配置项FescarProperties,用于配置事务分组名称

\n
spring.cloud.alibaba.fescar.tx-service-group=my_test_tx_group\n
\n

如果不指定服务组,则默认使用spring.application.name+ -fescar-service-group生成名称,所以不指定spring.application.name启动会报错

\n
@ConfigurationProperties(\"spring.cloud.alibaba.fescar\")\npublic class FescarProperties {\n    private String txServiceGroup;\n\n    public FescarProperties() {\n    }\n\n    public String getTxServiceGroup() {\n        return this.txServiceGroup;\n    }\n\n    public void setTxServiceGroup(String txServiceGroup) {\n        this.txServiceGroup = txServiceGroup;\n    }\n}\n
\n

获取applicationId和txServiceGroup后,创建GlobalTransactionScanner对象,主要看类中initClient方法

\n
private void initClient() {\n    if (StringUtils.isNullOrEmpty(applicationId) || StringUtils.isNullOrEmpty(txServiceGroup)) {\n        throw new IllegalArgumentException(\n            \"applicationId: \" + applicationId + \", txServiceGroup: \" + txServiceGroup);\n    }\n    //init TM\n    TMClient.init(applicationId, txServiceGroup);\n\n    //init RM\n    RMClient.init(applicationId, txServiceGroup);\n  \n}\n
\n

方法中可以看到初始化了TMClientRMClient,对于一个服务既可以是TM角色也可以是RM角色,至于什么时候是TM或者RM则要看在一次全局事务中@GlobalTransactional注解标注在哪。\nClient创建的结果是与TC的一个Netty连接,所以在启动日志中可以看到两个Netty Channel,其中标明了transactionRole分别为TMROLERMROLE

\n
2019-04-09 13:42:57.417  INFO 93715 --- [imeoutChecker_1] c.a.f.c.rpc.netty.NettyPoolableFactory   : NettyPool create channel to {\"address\":\"127.0.0.1:8091\",\"message\":{\"applicationId\":\"business-service\",\"byteBuffer\":{\"char\":\"\\u0000\",\"direct\":false,\"double\":0.0,\"float\":0.0,\"int\":0,\"long\":0,\"readOnly\":false,\"short\":0},\"transactionServiceGroup\":\"my_test_tx_group\",\"typeCode\":101,\"version\":\"0.4.1\"},\"transactionRole\":\"TMROLE\"}\n2019-04-09 13:42:57.505  INFO 93715 --- [imeoutChecker_1] c.a.f.c.rpc.netty.NettyPoolableFactory   : NettyPool create channel to {\"address\":\"127.0.0.1:8091\",\"message\":{\"applicationId\":\"business-service\",\"byteBuffer\":{\"char\":\"\\u0000\",\"direct\":false,\"double\":0.0,\"float\":0.0,\"int\":0,\"long\":0,\"readOnly\":false,\"short\":0},\"transactionServiceGroup\":\"my_test_tx_group\",\"typeCode\":103,\"version\":\"0.4.1\"},\"transactionRole\":\"RMROLE\"}\n2019-04-09 13:42:57.629 DEBUG 93715 --- [lector_TMROLE_1] c.a.f.c.rpc.netty.MessageCodecHandler    : Send:RegisterTMRequest{applicationId='business-service', transactionServiceGroup='my_test_tx_group'}\n2019-04-09 13:42:57.629 DEBUG 93715 --- [lector_RMROLE_1] c.a.f.c.rpc.netty.MessageCodecHandler    : Send:RegisterRMRequest{resourceIds='null', applicationId='business-service', transactionServiceGroup='my_test_tx_group'}\n2019-04-09 13:42:57.699 DEBUG 93715 --- [lector_RMROLE_1] c.a.f.c.rpc.netty.MessageCodecHandler    : Receive:version=0.4.1,extraData=null,identified=true,resultCode=null,msg=null,messageId:1\n2019-04-09 13:42:57.699 DEBUG 93715 --- [lector_TMROLE_1] c.a.f.c.rpc.netty.MessageCodecHandler    : Receive:version=0.4.1,extraData=null,identified=true,resultCode=null,msg=null,messageId:2\n2019-04-09 13:42:57.701 DEBUG 93715 --- [lector_RMROLE_1] c.a.f.c.rpc.netty.AbstractRpcRemoting    : com.alibaba.fescar.core.rpc.netty.RmRpcClient@3b06d101 msgId:1, future :com.alibaba.fescar.core.protocol.MessageFuture@28bb1abd, body:version=0.4.1,extraData=null,identified=true,resultCode=null,msg=null\n2019-04-09 13:42:57.701 DEBUG 93715 --- [lector_TMROLE_1] c.a.f.c.rpc.netty.AbstractRpcRemoting    : com.alibaba.fescar.core.rpc.netty.TmRpcClient@65fc3fb7 msgId:2, future :com.alibaba.fescar.core.protocol.MessageFuture@9a1e3df, body:version=0.4.1,extraData=null,identified=true,resultCode=null,msg=null\n2019-04-09 13:42:57.710  INFO 93715 --- [imeoutChecker_1] c.a.fescar.core.rpc.netty.RmRpcClient    : register RM success. server version:0.4.1,channel:[id: 0xe6468995, L:/127.0.0.1:57397 - R:/127.0.0.1:8091]\n2019-04-09 13:42:57.710  INFO 93715 --- [imeoutChecker_1] c.a.f.c.rpc.netty.NettyPoolableFactory   : register success, cost 114 ms, version:0.4.1,role:TMROLE,channel:[id: 0xd22fe0c5, L:/127.0.0.1:57398 - R:/127.0.0.1:8091]\n2019-04-09 13:42:57.711  INFO 93715 --- [imeoutChecker_1] c.a.f.c.rpc.netty.NettyPoolableFactory   : register success, cost 125 ms, version:0.4.1,role:RMROLE,channel:[id: 0xe6468995, L:/127.0.0.1:57397 - R:/127.0.0.1:8091]\n\n
\n

日志中可以看到

\n
    \n
  1. 创建Netty连接
  2. \n
  3. 发送注册请求
  4. \n
  5. 得到响应结果
  6. \n
  7. RmRpcClientTmRpcClient成功实例化
  8. \n
\n

TM处理流程

\n

在本例中,TM的角色是business-service,BusinessService的purchase方法标注了@GlobalTransactional注解

\n
@Service\npublic class BusinessService {\n\n    @Autowired\n    private StorageFeignClient storageFeignClient;\n    @Autowired\n    private OrderFeignClient orderFeignClient;\n\n    @GlobalTransactional\n    public void purchase(String userId, String commodityCode, int orderCount){\n        storageFeignClient.deduct(commodityCode, orderCount);\n\n        orderFeignClient.create(userId, commodityCode, orderCount);\n    }\n}\n
\n

方法调用后将会创建一个全局事务,首先关注@GlobalTransactional注解的作用,在GlobalTransactionalInterceptor中被拦截处理

\n
/**\n * AOP拦截方法调用\n */\n@Override\npublic Object invoke(final MethodInvocation methodInvocation) throws Throwable {\n    Class<?> targetClass = (methodInvocation.getThis() != null ? AopUtils.getTargetClass(methodInvocation.getThis()) : null);\n    Method specificMethod = ClassUtils.getMostSpecificMethod(methodInvocation.getMethod(), targetClass);\n    final Method method = BridgeMethodResolver.findBridgedMethod(specificMethod);\n\n\t//获取方法GlobalTransactional注解\n    final GlobalTransactional globalTransactionalAnnotation = getAnnotation(method, GlobalTransactional.class);\n    final GlobalLock globalLockAnnotation = getAnnotation(method, GlobalLock.class);\n    \n    //如果方法有GlobalTransactional注解,则拦截到相应方法处理\n    if (globalTransactionalAnnotation != null) {\n        return handleGlobalTransaction(methodInvocation, globalTransactionalAnnotation);\n    } else if (globalLockAnnotation != null) {\n        return handleGlobalLock(methodInvocation);\n    } else {\n        return methodInvocation.proceed();\n    }\n}\n
\n

handleGlobalTransaction方法中对TransactionalTemplate的execute进行了调用,从类名可以看到这是一个标准的模版方法,它定义了TM对全局事务处理的标准步骤,注释已经比较清楚了

\n
public Object execute(TransactionalExecutor business) throws TransactionalExecutor.ExecutionException {\n    // 1. get or create a transaction\n    GlobalTransaction tx = GlobalTransactionContext.getCurrentOrCreate();\n\n    try {\n        // 2. begin transaction\n        try {\n            triggerBeforeBegin();\n            tx.begin(business.timeout(), business.name());\n            triggerAfterBegin();\n        } catch (TransactionException txe) {\n            throw new TransactionalExecutor.ExecutionException(tx, txe,\n                TransactionalExecutor.Code.BeginFailure);\n        }\n        Object rs = null;\n        try {\n            // Do Your Business\n            rs = business.execute();\n        } catch (Throwable ex) {\n            // 3. any business exception, rollback.\n            try {\n                triggerBeforeRollback();\n                tx.rollback();\n                triggerAfterRollback();\n                // 3.1 Successfully rolled back\n                throw new TransactionalExecutor.ExecutionException(tx, TransactionalExecutor.Code.RollbackDone, ex);\n            } catch (TransactionException txe) {\n                // 3.2 Failed to rollback\n                throw new TransactionalExecutor.ExecutionException(tx, txe,\n                    TransactionalExecutor.Code.RollbackFailure, ex);\n            }\n        }\n        // 4. everything is fine, commit.\n        try {\n            triggerBeforeCommit();\n            tx.commit();\n            triggerAfterCommit();\n        } catch (TransactionException txe) {\n            // 4.1 Failed to commit\n            throw new TransactionalExecutor.ExecutionException(tx, txe,\n                TransactionalExecutor.Code.CommitFailure);\n        }\n        return rs;\n    } finally {\n        //5. clear\n        triggerAfterCompletion();\n        cleanUp();\n    }\n}\n
\n

通过DefaultGlobalTransaction的begin方法开启全局事务

\n
public void begin(int timeout, String name) throws TransactionException {\n    if (role != GlobalTransactionRole.Launcher) {\n        check();\n        if (LOGGER.isDebugEnabled()) {\n            LOGGER.debug(\"Ignore Begin(): just involved in global transaction [\" + xid + \"]\");\n        }\n        return;\n    }\n    if (xid != null) {\n        throw new IllegalStateException();\n    }\n    if (RootContext.getXID() != null) {\n        throw new IllegalStateException();\n    }\n    //具体开启事务的方法,获取TC返回的XID\n    xid = transactionManager.begin(null, null, name, timeout);\n    status = GlobalStatus.Begin;\n    RootContext.bind(xid);\n    if (LOGGER.isDebugEnabled()) {\n        LOGGER.debug(\"Begin a NEW global transaction [\" + xid + \"]\");\n    }\n}\n
\n

方法开头处if (role != GlobalTransactionRole.Launcher)对role的判断有关键的作用,表明当前是全局事务的发起者(Launcher)还是参与者(Participant)。如果在分布式事务的下游系统方法中也加上@GlobalTransactional注解,那么它的角色就是Participant,会忽略后面的begin直接return,而判断是Launcher还是Participant是根据当前上下文是否已存在XID来判断,没有XID的就是Launcher,已经存在XID的就是Participant.\n由此可见,全局事务的创建只能由Launcher执行,而一次分布式事务中也只有一个Launcher存在。

\n

DefaultTransactionManager负责TM与TC通讯,发送begin、commit、rollback指令

\n
@Override\npublic String begin(String applicationId, String transactionServiceGroup, String name, int timeout)\n    throws TransactionException {\n    GlobalBeginRequest request = new GlobalBeginRequest();\n    request.setTransactionName(name);\n    request.setTimeout(timeout);\n    GlobalBeginResponse response = (GlobalBeginResponse)syncCall(request);\n    return response.getXid();\n}\n
\n

至此拿到fescar-server返回的XID表示一个全局事务创建成功,日志中也反应了上述流程

\n
2019-04-09 13:46:57.417 DEBUG 31326 --- [nio-8084-exec-1] c.a.f.c.rpc.netty.AbstractRpcRemoting    : offer message: timeout=60000,transactionName=purchase(java.lang.String,java.lang.String,int)\n2019-04-09 13:46:57.417 DEBUG 31326 --- [geSend_TMROLE_1] c.a.f.c.rpc.netty.AbstractRpcRemoting    : write message:FescarMergeMessage timeout=60000,transactionName=purchase(java.lang.String,java.lang.String,int), channel:[id: 0xa148545e, L:/127.0.0.1:56120 - R:/127.0.0.1:8091],active?true,writable?true,isopen?true\n2019-04-09 13:46:57.418 DEBUG 31326 --- [lector_TMROLE_1] c.a.f.c.rpc.netty.MessageCodecHandler    : Send:FescarMergeMessage timeout=60000,transactionName=purchase(java.lang.String,java.lang.String,int)\n2019-04-09 13:46:57.421 DEBUG 31326 --- [lector_TMROLE_1] c.a.f.c.rpc.netty.MessageCodecHandler    : Receive:MergeResultMessage com.alibaba.fescar.core.protocol.transaction.GlobalBeginResponse@2dc480dc,messageId:1196\n2019-04-09 13:46:57.421 DEBUG 31326 --- [nio-8084-exec-1] c.a.fescar.core.context.RootContext      : bind 192.168.224.93:8091:2008502699\n2019-04-09 13:46:57.421 DEBUG 31326 --- [nio-8084-exec-1] c.a.f.tm.api.DefaultGlobalTransaction    : Begin a NEW global transaction [192.168.224.93:8091:2008502699]\n
\n

全局事务创建后,就开始执行business.execute(),即业务代码storageFeignClient.deduct(commodityCode, orderCount)进入RM处理流程,此处的业务逻辑为调用storage-service的扣减库存接口。

\n

RM处理流程

\n
@GetMapping(path = \"/deduct\")\npublic Boolean deduct(String commodityCode, Integer count){\n    storageService.deduct(commodityCode,count);\n    return true;\n}\n\n@Transactional\npublic void deduct(String commodityCode, int count){\n    Storage storage = storageDAO.findByCommodityCode(commodityCode);\n    storage.setCount(storage.getCount()-count);\n\n    storageDAO.save(storage);\n}\n
\n

storage的接口和service方法并未出现fescar相关的代码和注解,体现了fescar的无侵入。那它是如何加入到这次全局事务中的呢?答案在ConnectionProxy中,这也是前面说为什么必须要使用DataSourceProxy的原因,通过DataSourceProxy才能在业务代码的本地事务提交时,fescar通过该切入点,向TC注册分支事务并发送RM的处理结果。

\n

由于业务代码本身的事务提交被ConnectionProxy代理实现,所以在提交本地事务时,实际执行的是ConnectionProxy的commit方法

\n
public void commit() throws SQLException {\n\t//如果当前是全局事务,则执行全局事务的提交\n\t//判断是不是全局事务,就是看当前上下文是否存在XID\n    if (context.inGlobalTransaction()) {\n        processGlobalTransactionCommit();\n    } else if (context.isGlobalLockRequire()) {\n        processLocalCommitWithGlobalLocks();\n    } else {\n        targetConnection.commit();\n    }\n}\n    \nprivate void processGlobalTransactionCommit() throws SQLException {\n    try {\n    \t//首先是向TC注册RM,拿到TC分配的branchId\n        register();\n    } catch (TransactionException e) {\n        recognizeLockKeyConflictException(e);\n    }\n\n    try {\n        if (context.hasUndoLog()) {\n        \t//写入undolog\n            UndoLogManager.flushUndoLogs(this);\n        }\n\n\t\t//提交本地事务,写入undo_log和业务数据在同一个本地事务中\n        targetConnection.commit();\n    } catch (Throwable ex) {\n    \t//向TC发送RM的事务处理失败的通知\n        report(false);\n        if (ex instanceof SQLException) {\n            throw new SQLException(ex);\n        }\n    }\n\t//向TC发送RM的事务处理成功的通知\n    report(true);\n    context.reset();\n}\n    \nprivate void register() throws TransactionException {\n\t//注册RM,构建request通过netty向TC发送注册指令\n    Long branchId = DefaultResourceManager.get().branchRegister(BranchType.AT, getDataSourceProxy().getResourceId(),\n            null, context.getXid(), null, context.buildLockKeys());\n    //将返回的branchId存在上下文中\n    context.setBranchId(branchId);\n}\n
\n

通过日志印证一下上面的流程

\n
2019-04-09 21:57:48.341 DEBUG 38933 --- [nio-8081-exec-1] o.s.c.a.f.web.FescarHandlerInterceptor   : xid in RootContext null xid in RpcContext 192.168.0.2:8091:2008546211\n2019-04-09 21:57:48.341 DEBUG 38933 --- [nio-8081-exec-1] c.a.fescar.core.context.RootContext      : bind 192.168.0.2:8091:2008546211\n2019-04-09 21:57:48.341 DEBUG 38933 --- [nio-8081-exec-1] o.s.c.a.f.web.FescarHandlerInterceptor   : bind 192.168.0.2:8091:2008546211 to RootContext\n2019-04-09 21:57:48.386  INFO 38933 --- [nio-8081-exec-1] o.h.h.i.QueryTranslatorFactoryInitiator  : HHH000397: Using ASTQueryTranslatorFactory\nHibernate: select storage0_.id as id1_0_, storage0_.commodity_code as commodit2_0_, storage0_.count as count3_0_ from storage_tbl storage0_ where storage0_.commodity_code=?\nHibernate: update storage_tbl set count=? where id=?\n2019-04-09 21:57:48.673  INFO 38933 --- [nio-8081-exec-1] c.a.fescar.core.rpc.netty.RmRpcClient    : will connect to 192.168.0.2:8091\n2019-04-09 21:57:48.673  INFO 38933 --- [nio-8081-exec-1] c.a.fescar.core.rpc.netty.RmRpcClient    : RM will register :jdbc:mysql://127.0.0.1:3306/db_storage?useSSL=false\n2019-04-09 21:57:48.673  INFO 38933 --- [nio-8081-exec-1] c.a.f.c.rpc.netty.NettyPoolableFactory   : NettyPool create channel to {\"address\":\"192.168.0.2:8091\",\"message\":{\"applicationId\":\"storage-service\",\"byteBuffer\":{\"char\":\"\\u0000\",\"direct\":false,\"double\":0.0,\"float\":0.0,\"int\":0,\"long\":0,\"readOnly\":false,\"short\":0},\"resourceIds\":\"jdbc:mysql://127.0.0.1:3306/db_storage?useSSL=false\",\"transactionServiceGroup\":\"hello-service-fescar-service-group\",\"typeCode\":103,\"version\":\"0.4.0\"},\"transactionRole\":\"RMROLE\"}\n2019-04-09 21:57:48.677 DEBUG 38933 --- [lector_RMROLE_1] c.a.f.c.rpc.netty.MessageCodecHandler    : Send:RegisterRMRequest{resourceIds='jdbc:mysql://127.0.0.1:3306/db_storage?useSSL=false', applicationId='storage-service', transactionServiceGroup='hello-service-fescar-service-group'}\n2019-04-09 21:57:48.680 DEBUG 38933 --- [lector_RMROLE_1] c.a.f.c.rpc.netty.MessageCodecHandler    : Receive:version=0.4.1,extraData=null,identified=true,resultCode=null,msg=null,messageId:9\n2019-04-09 21:57:48.680 DEBUG 38933 --- [lector_RMROLE_1] c.a.f.c.rpc.netty.AbstractRpcRemoting    : com.alibaba.fescar.core.rpc.netty.RmRpcClient@7d61f5d4 msgId:9, future :com.alibaba.fescar.core.protocol.MessageFuture@186cd3e0, body:version=0.4.1,extraData=null,identified=true,resultCode=null,msg=null\n2019-04-09 21:57:48.680  INFO 38933 --- [nio-8081-exec-1] c.a.fescar.core.rpc.netty.RmRpcClient    : register RM success. server version:0.4.1,channel:[id: 0xd40718e3, L:/192.168.0.2:62607 - R:/192.168.0.2:8091]\n2019-04-09 21:57:48.680  INFO 38933 --- [nio-8081-exec-1] c.a.f.c.rpc.netty.NettyPoolableFactory   : register success, cost 3 ms, version:0.4.1,role:RMROLE,channel:[id: 0xd40718e3, L:/192.168.0.2:62607 - R:/192.168.0.2:8091]\n2019-04-09 21:57:48.680 DEBUG 38933 --- [nio-8081-exec-1] c.a.f.c.rpc.netty.AbstractRpcRemoting    : offer message: transactionId=2008546211,branchType=AT,resourceId=jdbc:mysql://127.0.0.1:3306/db_storage?useSSL=false,lockKey=storage_tbl:1\n2019-04-09 21:57:48.681 DEBUG 38933 --- [geSend_RMROLE_1] c.a.f.c.rpc.netty.AbstractRpcRemoting    : write message:FescarMergeMessage transactionId=2008546211,branchType=AT,resourceId=jdbc:mysql://127.0.0.1:3306/db_storage?useSSL=false,lockKey=storage_tbl:1, channel:[id: 0xd40718e3, L:/192.168.0.2:62607 - R:/192.168.0.2:8091],active?true,writable?true,isopen?true\n2019-04-09 21:57:48.681 DEBUG 38933 --- [lector_RMROLE_1] c.a.f.c.rpc.netty.MessageCodecHandler    : Send:FescarMergeMessage transactionId=2008546211,branchType=AT,resourceId=jdbc:mysql://127.0.0.1:3306/db_storage?useSSL=false,lockKey=storage_tbl:1\n2019-04-09 21:57:48.687 DEBUG 38933 --- [lector_RMROLE_1] c.a.f.c.rpc.netty.MessageCodecHandler    : Receive:MergeResultMessage BranchRegisterResponse: transactionId=2008546211,branchId=2008546212,result code =Success,getMsg =null,messageId:11\n2019-04-09 21:57:48.702 DEBUG 38933 --- [nio-8081-exec-1] c.a.f.rm.datasource.undo.UndoLogManager  : Flushing UNDO LOG: {\"branchId\":2008546212,\"sqlUndoLogs\":[{\"afterImage\":{\"rows\":[{\"fields\":[{\"keyType\":\"PrimaryKey\",\"name\":\"id\",\"type\":4,\"value\":1},{\"keyType\":\"NULL\",\"name\":\"count\",\"type\":4,\"value\":993}]}],\"tableName\":\"storage_tbl\"},\"beforeImage\":{\"rows\":[{\"fields\":[{\"keyType\":\"PrimaryKey\",\"name\":\"id\",\"type\":4,\"value\":1},{\"keyType\":\"NULL\",\"name\":\"count\",\"type\":4,\"value\":994}]}],\"tableName\":\"storage_tbl\"},\"sqlType\":\"UPDATE\",\"tableName\":\"storage_tbl\"}],\"xid\":\"192.168.0.2:8091:2008546211\"}\n2019-04-09 21:57:48.755 DEBUG 38933 --- [nio-8081-exec-1] c.a.f.c.rpc.netty.AbstractRpcRemoting    : offer message: transactionId=2008546211,branchId=2008546212,resourceId=null,status=PhaseOne_Done,applicationData=null\n2019-04-09 21:57:48.755 DEBUG 38933 --- [geSend_RMROLE_1] c.a.f.c.rpc.netty.AbstractRpcRemoting    : write message:FescarMergeMessage transactionId=2008546211,branchId=2008546212,resourceId=null,status=PhaseOne_Done,applicationData=null, channel:[id: 0xd40718e3, L:/192.168.0.2:62607 - R:/192.168.0.2:8091],active?true,writable?true,isopen?true\n2019-04-09 21:57:48.756 DEBUG 38933 --- [lector_RMROLE_1] c.a.f.c.rpc.netty.MessageCodecHandler    : Send:FescarMergeMessage transactionId=2008546211,branchId=2008546212,resourceId=null,status=PhaseOne_Done,applicationData=null\n2019-04-09 21:57:48.758 DEBUG 38933 --- [lector_RMROLE_1] c.a.f.c.rpc.netty.MessageCodecHandler    : Receive:MergeResultMessage com.alibaba.fescar.core.protocol.transaction.BranchReportResponse@582a08cf,messageId:13\n2019-04-09 21:57:48.799 DEBUG 38933 --- [nio-8081-exec-1] c.a.fescar.core.context.RootContext      : unbind 192.168.0.2:8091:2008546211\n2019-04-09 21:57:48.799 DEBUG 38933 --- [nio-8081-exec-1] o.s.c.a.f.web.FescarHandlerInterceptor   : unbind 192.168.0.2:8091:2008546211 from RootContext\n
\n
    \n
  1. 获取business-service传来的XID
  2. \n
  3. 绑定XID到当前上下文中
  4. \n
  5. 执行业务逻辑sql
  6. \n
  7. 向TC创建本次RM的Netty连接
  8. \n
  9. 向TC发送分支事务的相关信息
  10. \n
  11. 获得TC返回的branchId
  12. \n
  13. 记录Undo Log数据
  14. \n
  15. 向TC发送本次事务PhaseOne阶段的处理结果
  16. \n
  17. 从当前上下文中解绑XID
  18. \n
\n

其中第1步和第9步,是在FescarHandlerInterceptor中完成的,该类并不属于fescar,是前面提到的spring-cloud-alibaba-fescar,它实现了基于feign、rest通信时将xid bind和unbind到当前请求上下文中。到这里RM完成了PhaseOne阶段的工作,接着看PhaseTwo阶段的处理逻辑。

\n

事务提交

\n

各分支事务执行完成后,TC对各RM的汇报结果进行汇总,给各RM发送commit或rollback的指令

\n
2019-04-09 21:57:49.813 DEBUG 38933 --- [lector_RMROLE_1] c.a.f.c.rpc.netty.MessageCodecHandler    : Receive:xid=192.168.0.2:8091:2008546211,branchId=2008546212,branchType=AT,resourceId=jdbc:mysql://127.0.0.1:3306/db_storage?useSSL=false,applicationData=null,messageId:1\n2019-04-09 21:57:49.813 DEBUG 38933 --- [lector_RMROLE_1] c.a.f.c.rpc.netty.AbstractRpcRemoting    : com.alibaba.fescar.core.rpc.netty.RmRpcClient@7d61f5d4 msgId:1, body:xid=192.168.0.2:8091:2008546211,branchId=2008546212,branchType=AT,resourceId=jdbc:mysql://127.0.0.1:3306/db_storage?useSSL=false,applicationData=null\n2019-04-09 21:57:49.814  INFO 38933 --- [atch_RMROLE_1_8] c.a.f.core.rpc.netty.RmMessageListener   : onMessage:xid=192.168.0.2:8091:2008546211,branchId=2008546212,branchType=AT,resourceId=jdbc:mysql://127.0.0.1:3306/db_storage?useSSL=false,applicationData=null\n2019-04-09 21:57:49.816  INFO 38933 --- [atch_RMROLE_1_8] com.alibaba.fescar.rm.AbstractRMHandler  : Branch committing: 192.168.0.2:8091:2008546211 2008546212 jdbc:mysql://127.0.0.1:3306/db_storage?useSSL=false null\n2019-04-09 21:57:49.816  INFO 38933 --- [atch_RMROLE_1_8] com.alibaba.fescar.rm.AbstractRMHandler  : Branch commit result: PhaseTwo_Committed\n2019-04-09 21:57:49.817  INFO 38933 --- [atch_RMROLE_1_8] c.a.fescar.core.rpc.netty.RmRpcClient    : RmRpcClient sendResponse branchStatus=PhaseTwo_Committed,result code =Success,getMsg =null\n2019-04-09 21:57:49.817 DEBUG 38933 --- [atch_RMROLE_1_8] c.a.f.c.rpc.netty.AbstractRpcRemoting    : send response:branchStatus=PhaseTwo_Committed,result code =Success,getMsg =null,channel:[id: 0xd40718e3, L:/192.168.0.2:62607 - R:/192.168.0.2:8091]\n2019-04-09 21:57:49.817 DEBUG 38933 --- [lector_RMROLE_1] c.a.f.c.rpc.netty.MessageCodecHandler    : Send:branchStatus=PhaseTwo_Committed,result code =Success,getMsg =null\n
\n

从日志中可以看到

\n
    \n
  1. RM收到XID=192.168.0.2:8091:2008546211,branchId=2008546212的commit通知
  2. \n
  3. 执行commit动作
  4. \n
  5. 将commit结果发送给TC,branchStatus为PhaseTwo_Committed
  6. \n
\n

具体看下二阶段commit的执行过程,在AbstractRMHandler类的doBranchCommit方法

\n
/**\n * 拿到通知的xid、branchId等关键参数\n * 然后调用RM的branchCommit\n */\nprotected void doBranchCommit(BranchCommitRequest request, BranchCommitResponse response) throws TransactionException {\n    String xid = request.getXid();\n    long branchId = request.getBranchId();\n    String resourceId = request.getResourceId();\n    String applicationData = request.getApplicationData();\n    LOGGER.info(\"Branch committing: \" + xid + \" \" + branchId + \" \" + resourceId + \" \" + applicationData);\n    BranchStatus status = getResourceManager().branchCommit(request.getBranchType(), xid, branchId, resourceId, applicationData);\n    response.setBranchStatus(status);\n    LOGGER.info(\"Branch commit result: \" + status);\n}\n
\n

最终会将branchCommit的请求调用到AsyncWorker的branchCommit方法。AsyncWorker的处理方式是fescar架构的一个关键部分,因为大部分事务都是会正常提交的,所以在PhaseOne阶段就已经结束了,这样就可以将锁最快的释放。PhaseTwo阶段接收commit的指令后,异步处理即可。将PhaseTwo的时间消耗排除在一次分布式事务之外。

\n
private static final List<Phase2Context> ASYNC_COMMIT_BUFFER = Collections.synchronizedList( new ArrayList<Phase2Context>());\n        \n/**\n * 将需要提交的XID加入list\n */\n@Override\npublic BranchStatus branchCommit(BranchType branchType, String xid, long branchId, String resourceId, String applicationData) throws TransactionException {\n    if (ASYNC_COMMIT_BUFFER.size() < ASYNC_COMMIT_BUFFER_LIMIT) {\n        ASYNC_COMMIT_BUFFER.add(new Phase2Context(branchType, xid, branchId, resourceId, applicationData));\n    } else {\n        LOGGER.warn(\"Async commit buffer is FULL. Rejected branch [\" + branchId + \"/\" + xid + \"] will be handled by housekeeping later.\");\n    }\n    return BranchStatus.PhaseTwo_Committed;\n}\n\t\n/**\n * 通过定时任务消费list中的XID\n */\npublic synchronized void init() {\n    LOGGER.info(\"Async Commit Buffer Limit: \" + ASYNC_COMMIT_BUFFER_LIMIT);\n    timerExecutor = new ScheduledThreadPoolExecutor(1,\n        new NamedThreadFactory(\"AsyncWorker\", 1, true));\n    timerExecutor.scheduleAtFixedRate(new Runnable() {\n        @Override\n        public void run() {\n            try {\n                doBranchCommits();\n            } catch (Throwable e) {\n                LOGGER.info(\"Failed at async committing ... \" + e.getMessage());\n            }\n        }\n    }, 10, 1000 * 1, TimeUnit.MILLISECONDS);\n}\n\t\nprivate void doBranchCommits() {\n    if (ASYNC_COMMIT_BUFFER.size() == 0) {\n        return;\n    }\n    Map<String, List<Phase2Context>> mappedContexts = new HashMap<>();\n    Iterator<Phase2Context> iterator = ASYNC_COMMIT_BUFFER.iterator();\n    \n    //一次定时循环取出ASYNC_COMMIT_BUFFER中的所有待办数据\n    //以resourceId作为key分组待commit数据,resourceId是一个数据库的连接url\n    //在前面的日志中可以看到,目的是为了覆盖应用的多数据源创建\n    while (iterator.hasNext()) {\n        Phase2Context commitContext = iterator.next();\n        List<Phase2Context> contextsGroupedByResourceId = mappedContexts.get(commitContext.resourceId);\n        if (contextsGroupedByResourceId == null) {\n            contextsGroupedByResourceId = new ArrayList<>();\n            mappedContexts.put(commitContext.resourceId, contextsGroupedByResourceId);\n        }\n        contextsGroupedByResourceId.add(commitContext);\n\n        iterator.remove();\n\n    }\n\n    for (Map.Entry<String, List<Phase2Context>> entry : mappedContexts.entrySet()) {\n        Connection conn = null;\n        try {\n            try {\n            \t//根据resourceId获取数据源以及连接\n                DataSourceProxy dataSourceProxy = DataSourceManager.get().get(entry.getKey());\n                conn = dataSourceProxy.getPlainConnection();\n            } catch (SQLException sqle) {\n                LOGGER.warn(\"Failed to get connection for async committing on \" + entry.getKey(), sqle);\n                continue;\n            }\n            List<Phase2Context> contextsGroupedByResourceId = entry.getValue();\n            for (Phase2Context commitContext : contextsGroupedByResourceId) {\n                try {\n                \t//执行undolog的处理,即删除xid、branchId对应的记录\n                    UndoLogManager.deleteUndoLog(commitContext.xid, commitContext.branchId, conn);\n                } catch (Exception ex) {\n                    LOGGER.warn(\n                        \"Failed to delete undo log [\" + commitContext.branchId + \"/\" + commitContext.xid + \"]\", ex);\n                }\n            }\n\n        } finally {\n            if (conn != null) {\n                try {\n                    conn.close();\n                } catch (SQLException closeEx) {\n                    LOGGER.warn(\"Failed to close JDBC resource while deleting undo_log \", closeEx);\n                }\n            }\n        }\n    }\n}\n
\n

所以对于commit动作的处理,RM只需删除xid、branchId对应的undo_log即可。

\n

事务回滚

\n

对于rollback场景的触发有两种情况

\n
    \n
  1. 分支事务处理异常,即ConnectionProxyreport(false)的情况
  2. \n
  3. TM捕获到下游系统上抛的异常,即发起全局事务标有@GlobalTransactional注解的方法捕获到的异常。在前面TransactionalTemplate类的execute模版方法中,对business.execute()的调用进行了catch,catch后会调用rollback,由TM通知TC对应XID需要回滚事务
  4. \n
\n
 public void rollback() throws TransactionException {\n    //只有Launcher能发起这个rollback\n    if (role == GlobalTransactionRole.Participant) {\n        // Participant has no responsibility of committing\n        if (LOGGER.isDebugEnabled()) {\n            LOGGER.debug(\"Ignore Rollback(): just involved in global transaction [\" + xid + \"]\");\n        }\n        return;\n    }\n    if (xid == null) {\n        throw new IllegalStateException();\n    }\n\n    status = transactionManager.rollback(xid);\n    if (RootContext.getXID() != null) {\n        if (xid.equals(RootContext.getXID())) {\n            RootContext.unbind();\n        }\n    }\n}\n
\n

TC汇总后向参与者发送rollback指令,RM在AbstractRMHandler类的doBranchRollback方法中接收这个rollback的通知

\n
protected void doBranchRollback(BranchRollbackRequest request, BranchRollbackResponse response) throws TransactionException {\n    String xid = request.getXid();\n    long branchId = request.getBranchId();\n    String resourceId = request.getResourceId();\n    String applicationData = request.getApplicationData();\n    LOGGER.info(\"Branch rolling back: \" + xid + \" \" + branchId + \" \" + resourceId);\n    BranchStatus status = getResourceManager().branchRollback(request.getBranchType(), xid, branchId, resourceId, applicationData);\n    response.setBranchStatus(status);\n    LOGGER.info(\"Branch rollback result: \" + status);\n}\n
\n

然后将rollback请求传递到DataSourceManager类的branchRollback方法

\n
public BranchStatus branchRollback(BranchType branchType, String xid, long branchId, String resourceId, String applicationData) throws TransactionException {\n    //根据resourceId获取对应的数据源\n    DataSourceProxy dataSourceProxy = get(resourceId);\n    if (dataSourceProxy == null) {\n        throw new ShouldNeverHappenException();\n    }\n    try {\n        UndoLogManager.undo(dataSourceProxy, xid, branchId);\n    } catch (TransactionException te) {\n        if (te.getCode() == TransactionExceptionCode.BranchRollbackFailed_Unretriable) {\n            return BranchStatus.PhaseTwo_RollbackFailed_Unretryable;\n        } else {\n            return BranchStatus.PhaseTwo_RollbackFailed_Retryable;\n        }\n    }\n    return BranchStatus.PhaseTwo_Rollbacked;\n}\n
\n

最终会执行UndoLogManager类的undo方法,因为是纯jdbc操作代码比较长就不贴出来了,可以通过连接到github查看源码,说一下undo的具体流程

\n
    \n
  1. 根据xid和branchId查找PhaseOne阶段提交的undo_log
  2. \n
  3. 如果找到了就根据undo_log中记录的数据生成回放sql并执行,即还原PhaseOne阶段修改的数据
  4. \n
  5. 第2步处理完后,删除该条undo_log数据
  6. \n
  7. 如果第1步没有找到对应的undo_log,就插入一条状态为GlobalFinished的undo_log.\n出现没找到的原因可能是PhaseOne阶段的本地事务异常了,导致没有正常写入。\n因为xid和branchId是唯一索引,所以第4步的插入,可以防止PhaseOne阶段恢复后的成功写入,那么PhaseOne阶段就会异常,这样一来业务数据也就不会提交成功,数据达到了最终回滚了的效果
  8. \n
\n

总结

\n

本地结合分布式业务场景,分析了fescar client侧的主要处理流程,对TM和RM角色的主要源码进行了解析,希望能对大家理解fescar的工作原理有所帮助。

\n

随着fescar的快速迭代以及后期的Roadmap规划,假以时日相信fescar能够成为开源分布式事务的标杆解决方案。

\n", - "link": "/zh-cn/blog/seata-analysis-java-client.html", - "meta": { - "title": "分布式事务之Seata-Client原理及流程详解", - "author": "fangliangsheng", - "date": "2019/04/15", - "keywords": "fescar、seata、分布式事务" - } -} \ No newline at end of file diff --git a/zh-cn/blog/seata-analysis-java-server.html b/zh-cn/blog/seata-analysis-java-server.html deleted file mode 100644 index 59b38e42..00000000 --- a/zh-cn/blog/seata-analysis-java-server.html +++ /dev/null @@ -1,282 +0,0 @@ - - - - - - - - - - 深度剖析一站式分布式事务方案Seata-Server - - - - -

1.关于Seata

-

再前不久,我写了一篇关于分布式事务中间件Fescar的解析,没过几天Fescar团队对其进行了品牌升级,取名为Seata(Simpe Extensible Autonomous Transcaction Architecture),而以前的Fescar的英文全称为Fast & EaSy Commit And Rollback。可以看见Fescar从名字上来看更加局限于Commit和Rollback,而新的品牌名字Seata旨在打造一套一站式分布式事务解决方案。更换名字之后,我对其未来的发展更有信心。

-

这里先大概回忆一下Seata的整个过程模型:

-

-
    -
  • TM:事务的发起者。用来告诉TC,全局事务的开始,提交,回滚。
  • -
  • RM:具体的事务资源,每一个RM都会作为一个分支事务注册在TC。
  • -
  • TC:事务的协调者。也可以看做是Fescar-server,用于接收我们的事务的注册,提交和回滚。
  • -
-

在之前的文章中对整个角色有个大体的介绍,在这篇文章中我将重点介绍其中的核心角色TC,也就是事务协调器。

-

2.Transcation Coordinator

-

为什么之前一直强调 TC 是核心呢?那因为TC这个角色就好像上帝一样,管控着云云众生的RM和TM。如果TC一旦不好使,那么RM和TM一旦出现小问题,那必定会乱的一塌糊涂。所以要想了解Seata,那么必须要了解它的TC。

-

那么一个优秀的事务协调者应该具备哪些能力呢?我觉得应该有以下几个:

-
    -
  • 正确的协调:能正确的协调RM和TM接下来应该做什么,做错了应该怎么办,做对了应该怎么办。
  • -
  • 高可用: 事务协调器在分布式事务中很重要,如果不能保证高可用,那么它也没有存在的必要了。
  • -
  • 高性能:事务协调器的性能一定要高,如果事务协调器性能有瓶颈那么它所管理的RM和TM那么会经常遇到超时,从而引起回滚频繁。
  • -
  • 高扩展性:这个特点是属于代码层面的,如果是一个优秀的框架,那么需要给使用方很多自定义扩展,比如服务注册/发现,读取配置等等。
  • -
-

下面我也将逐步阐述Seata是如何做到上面四点。

-

2.1 Seata-Server的设计

-

-

Seata-Server整体的模块图如上所示:

-
    -
  • Coordinator Core: 在最下面的模块是事务协调器核心代码,主要用来处理事务协调的逻辑,如是否commit,rollback等协调活动。
  • -
  • Store:存储模块,用来将我们的数据持久化,防止重启或者宕机数据丢失。
  • -
  • Discovery: 服务注册/发现模块,用于将Server地址暴露给我们Client。
  • -
  • Config: 用来存储和查找我们服务端的配置。
  • -
  • Lock: 锁模块,用于给Seata提供全局锁的功能。
  • -
  • RPC:用于和其它端通信。
  • -
  • HA-Cluster:高可用集群,目前还没开源,为Seata提供可靠的高可用服务,预计将会在0.6版本开源。
  • -
-

2.2 Discovery

-

首先来讲讲比较基础的Discovery模块,又称服务注册/发现模块。我们将Seata-Sever启动之后,需要将自己的地址暴露给其它使用者,那么就需要我们这个模块帮忙。

-

-

这个模块有个核心接口RegistryService,如上图所示:

-
    -
  • register:服务端使用,进行服务注册。
  • -
  • unregister:服务端使用,一般在JVM关闭钩子,ShutdownHook中调用。
  • -
  • subscribe:客户端使用,注册监听事件,用来监听地址的变化。
  • -
  • unsubscribe:客户端使用,取消注册监听事件。
  • -
  • lookup:客户端使用,根据key查找服务地址列表。
  • -
  • close:都可以使用,用于关闭Registry资源。
  • -
-

如果需要添加自己定义的服务注册/发现,那么实现这个接口即可。截止目前在社区的不断开发推动下,已经有五种服务注册/发现,分别是redis、zk、nacos、eruka 和 consul。下面简单介绍下Nacos的实现:

-

2.2.1 register接口:

-

-

step1:校验地址是否合法

-

step2:获取Nacos的 Naming 实例,然后将地址注册到服务名为 serverAddr(固定服务名) 的对应集群分组(registry.conf 文件配置)上面。

-

unregister接口类似,这里不做详解。

-

2.2.2 lookup接口:

-

-

step1:获取当前clusterName名字。

-

step2:判断当前集群名对应的服务是否已经订阅过了,如果是直接从map中取订阅返回的数据。

-

step3:如果没有订阅先主动查询一次服务实例列表,然后添加订阅并将订阅返回的数据存放到map中,之后直接从map获取最新数据。

-

2.2.3 subscribe接口

-

-

这个接口比较简单,具体分两步:

-

step1:对将要订阅的cluster-> listener 存放到map中,此处nacos未提交单机已订阅列表,所以需要自己实现。

-

step2:使用Nacos api 订阅。

-

2.3 Config

-

配置模块也是一个比较基础,比较简单的模块。我们需要配置一些常用的参数比如:Netty的select线程数量,work线程数量,session允许最大为多少等等,当然这些参数再Seata中都有自己的默认设置。

-

同样的在Seata中也提供了一个接口Configuration,用来自定义我们需要的获取配置的地方:

-

-
    -
  • getInt/Long/Boolean/getConfig():通过dataId来获取对应的值,读取不到配置、异常或超时将返回参数中的默认值。
  • -
  • putConfig:用于添加配置。
  • -
  • removeConfig:删除一个配置。
  • -
  • add/remove/get ConfigListener:添加/删除/获取 配置监听器,一般用来监听配置的变更。
  • -
-

目前为止有四种方式获取Config:File(文件获取)、Nacos、Apollo 和 ZK(不推荐)。在Seata中首先需要配置registry.conf,来配置config.type 。实现conf比较简单这里就不深入分析。

-

2.4 Store

-

存储层的实现对于Seata是否高性能,是否可靠非常关键。 -如果存储层没有实现好,那么如果发生宕机,在TC中正在进行分布式事务处理的数据将会被丢失,既然使用了分布式事务,那么其肯定不能容忍丢失。如果存储层实现好了,但是其性能有很大问题,RM可能会发生频繁回滚那么其完全无法应对高并发的场景。

-

在Seata中默认提供了文件方式的存储,下面我们定义我们存储的数据为Session,而我们的TM创造的全局事务操作数据叫GloabSession,RM创造的分支事务操作数据叫BranchSession,一个GloabSession可以拥有多个BranchSession。我们的目的就是要将这么多Session存储下来。

-

在FileTransactionStoreManager#writeSession代码中:

-

-

上面的代码主要分为下面几步:

-
    -
  • step1:生成一个TransactionWriteFuture。
  • -
  • step2:将这个futureRequest丢进一个LinkedBlockingQueue中。为什么需要将所有数据都丢进队列中呢?当然这里其实也可以用锁来实现,再另外一个阿里开源的RocketMQ中,使用的锁。不论是队列还是锁它们的目的是为了保证单线程写,这又是为什么呢?有人会解释说,需要保证顺序写,这样速度就很快,这个理解是错误的,我们的FileChannel的写方法是线程安全的,已经能保证顺序写了。保证单线程写其实是为了让我们这个写逻辑都是单线程的,因为可能有些文件写满或者记录写数据位置等等逻辑,当然这些逻辑都可以主动加锁去做,但是为了实现简单方便,直接再整个写逻辑排队处理是最为合适的。
  • -
  • step3:调用future.get,等待我们该条数据写逻辑完成通知。
  • -
-

我们将数据提交到队列之后,我们接下来需要对其进行消费,代码如下:

-

-

这里将一个WriteDataFileRunnable()提交进我们的线程池,这个Runnable的run()方法如下:

-

-

分为下面几步:

-

step1: 判断是否停止,如果stopping为true则返回null。

-

step2:从我们的队列中获取数据。

-

step3:判断future是否已经超时了,如果超时,则设置结果为false,此时我们生产者get()方法会接触阻塞。

-

step4:将我们的数据写进文件,此时数据还在pageCahce层并没有刷新到磁盘,如果写成功然后根据条件判断是否进行刷盘操作。

-

step5:当写入数量到达一定的时候,或者写入时间到达一定的时候,需要将我们当前的文件保存为历史文件,删除以前的历史文件,然后创建新的文件。这一步是为了防止我们文件无限增长,大量无效数据浪费磁盘资源。

-

在我们的writeDataFile中有如下代码:

-

-

step1:首先获取我们的ByteBuffer,如果超出最大循环BufferSize就直接创建一个新的,否则就使用我们缓存的Buffer。这一步可以很大的减少GC。

-

step2:然后将数据添加进入ByteBuffer。

-

step3:最后将ByteBuffer写入我们的fileChannel,这里会重试三次。此时的数据还在pageCache层,受两方面的影响,OS有自己的刷新策略,但是这个业务程序不能控制,为了防止宕机等事件出现造成大量数据丢失,所以就需要业务自己控制flush。下面是flush的代码:

-

-

这里flush的条件写入一定数量或者写的时间超过一定时间,这样也会有个小问题如果是停电,那么pageCache中有可能还有数据并没有被刷盘,会导致少量的数据丢失。目前还不支持同步模式,也就是每条数据都需要做刷盘操作,这样可以保证每条消息都落盘,但是性能也会受到极大的影响,当然后续会不断的演进支持。

-

我们的store核心流程主要是上面几个方法,当然还有一些比如,session重建等,这些比较简单,读者可以自行阅读。

-

2.5 Lock

-

大家知道数据库实现隔离级别主要是通过锁来实现的,同样的再分布式事务框架Seata中要实现隔离级别也需要通过锁。一般在数据库中数据库的隔离级别一共有四种:读未提交,读已提交,可重复读,串行化。在Seata中可以保证隔离级别是读已提交,但是提供了达到读已提交隔离的手段。

-

Lock模块也就是Seata实现隔离级别的核心模块。在Lock模块中提供了一个接口用于管理我们的锁: -

-

其中有三个方法:

-
    -
  • acquireLock:用于对我们的BranchSession加锁,这里虽然是传的分支事务Session,实际上是对分支事务的资源加锁,成功返回true。
  • -
  • isLockable:根据事务ID,资源Id,锁住的Key来查询是否已经加锁。
  • -
  • cleanAllLocks:清除所有的锁。 -对于锁我们可以在本地实现,也可以通过redis或者mysql来帮助我们实现。官方默认提供了本地全局锁的实现: -
  • -
-

在本地锁的实现中有两个常量需要关注:

-
    -
  • BUCKET_PER_TABLE:用来定义每个table有多少个bucket,目的是为了后续对同一个表加锁的时候减少竞争。
  • -
  • LOCK_MAP:这个map从定义上来看非常复杂,里里外外套了很多层Map,这里用个表格具体说明一下:
  • -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
层数keyvalue
1-LOCK_MAPresourceId(jdbcUrl)dbLockMap
2- dbLockMaptableName (表名)tableLockMap
3- tableLockMapPK.hashcode%Bucket (主键值的hashcode%bucket)bucketLockMap
4- bucketLockMapPKtrascationId
-

可以看见实际上的加锁在bucketLockMap这个map中,这里具体的加锁方法比较简单就不作详细阐述,主要是逐步的找到bucketLockMap,然后将当前trascationId塞进去,如果这个主键当前有TranscationId,那么比较是否是自己,如果不是则加锁失败。

-

2.6 RPC

-

保证Seata高性能的关键之一也是使用了Netty作为RPC框架,采用默认配置的线程模型如下图所示:

-

-

如果采用默认的基本配置那么会有一个Acceptor线程用于处理客户端的链接,会有cpu*2数量的NIO-Thread,再这个线程中不会做业务太重的事情,只会做一些速度比较快的事情,比如编解码,心跳事件,和TM注册。一些比较费时间的业务操作将会交给业务线程池,默认情况下业务线程池配置为最小线程为100,最大为500。

-

Seata 目前允许配置的传输层配置如图所示,用户可根据需要进行Netty传输层面的调优,配置通过配置中心配置,首次加载时生效。

- -这里需要提一下的是Seata的心跳机制,这里是使用Netty的IdleStateHandler完成的,如下: -

-

在Sever端对于写没有设置最大空闲时间,对于读设置了最大空闲时间,默认为15s(客户端默认写空闲为5s,发送ping消息),如果超过15s则会将链接断开,关闭资源。

-

-

step1:判断是否是读空闲的检测事件。

-

step2:如果是则断开链接,关闭资源。
-另外Seata 做了内存池、客户端做了批量小包合并发送、Netty连接池(减少连接创建时的服务不可用时间)等功能,以下为批量小包合并功能。
-
-客户端的消息发送并不是真正的消息发送通过 AbstractRpcRemoting#sendAsyncRequest 包装成 RpcMessage 存储至 basket 中并唤醒合并发送线程。合并发送线程通过 while true 的形式 -最长等待1ms对basket的消息取出包装成 merge 消息进行真正发送,此时若 channel 出现异常则会通过 fail-fast 快速失败返回结果。merge消息发送前在 map 中标识,收到结果后批量确认(AbstractRpcRemotingClient#channelRead),并通过 dispatch 分发至 messageListener 和 handler 去处理。同时,timerExecutor 定时对已发送 -消息进行超时检测,若超时置为失败。具体消息协议设计将会在后续的文章中给出,敬请关注。
-Seata 的 Netty Client由 TMClient和RMClient 组成,根据事务角色功能区分,都继承 AbstractRpcRemotingClient,AbstractRpcRemotingClient 实现了 RemotingService(服务启停), RegisterMsgListener(netty 连接池连接创建回调)和 ClientMessageSender(消息发送)继承了 AbstractRpcRemoting( Client和Server 顶层消息发送和处理的模板)。
-RMClient类关系图如下图所示: - -TMClient 和 RMClient 又会根据自身的 poolConfig 配置与 NettyPoolableFactory implements KeyedPoolableObjectFactory<NettyPoolKey, Channel> 进行 channel 连接的交互,channel 连接池根据角色 key+ip 作为连接池的 key 来定位各个连接池 -,连接池对 channel 进行统一的管理。TMClient 和 RMClient 在发送过程中对于每个 ip 只会使用一个长连接,但连接不可用时,会从连接池中快速取出已经创建好并可用的连接,减少服务的不可用时间。

-

2.7 HA-Cluster

-

目前官方没有公布HA-Cluster,但是通过一些其它中间件和官方的一些透露,可以将HA-Cluster用如下方式设计: -

-

具体的流程如下:

-

step1:客户端发布信息的时候根据transcationId保证同一个transcation是在同一个master上,通过多个Master水平扩展,提供并发处理性能。

-

step2:在server端中一个master有多个slave,master中的数据近实时同步到slave上,保证当master宕机的时候,还能有其它slave顶上来可以用。

-

当然上述一切都是猜测,具体的设计实现还得等0.5版本之后。目前有一个Go版本的Seata-Server也捐赠给了Seata(还在流程中),其通过raft实现副本一致性,其它细节不是太清楚。

-

2.8 Metrics

-

这个模块也是一个没有具体公布实现的模块,当然有可能会提供插件口,让其它第三方metric接入进来,最近Apache SkyWalking 正在和Seata小组商讨如何接入进来。

-

3.Coordinator Core

-

上面我们讲了很多Server基础模块,想必大家对Seata的实现已经有个大概,接下来我会讲解事务协调器具体逻辑是如何实现的,让大家更加了解Seata的实现内幕。

-

3.1 启动流程

-

启动方法在Server类有个main方法,定义了我们启动流程:

-

-

step1:创建一个RpcServer,再这个里面包含了我们网络的操作,用Netty实现了服务端。

-

step2:解析端口号、本地文件地址(用户Server宕机未处理完成事务恢复)、IP(可选,本机只能获取内网ip,在跨网络时需要一个对外的vip注册服务)。

-

step3:初始化SessionHoler,其中最重要的重要就是重我们dataDir这个文件夹中恢复我们的数据,重建我们的Session。

-

step4:创建一个CoorDinator,这个也是我们事务协调器的逻辑核心代码,然后将其初始化,其内部初始化的逻辑会创建四个定时任务:

-
    -
  • retryRollbacking:重试rollback定时任务,用于将那些失败的rollback进行重试的,每隔5ms执行一次。
  • -
  • retryCommitting:重试commit定时任务,用于将那些失败的commit进行重试的,每隔5ms执行一次。
  • -
  • asyncCommitting:异步commit定时任务,用于执行异步的commit,每隔10ms一次。
  • -
  • timeoutCheck:超时定时任务检测,用于检测超时的任务,然后执行超时的逻辑,每隔2ms执行一次。
  • -
-

step5: 初始化UUIDGenerator这个也是我们生成各种ID(transcationId,branchId)的基本类。

-

step6:将本地IP和监听端口设置到XID中,初始化rpcServer等待客户端的连接。

-

启动流程比较简单,下面我会介绍分布式事务框架中的常见的一些业务逻辑Seata是如何处理的。

-

3.2 Begin-开启全局事务

-

一次分布式事务的起始点一定是开启全局事务,首先我们看看全局事务Seata是如何实现的:

-

-

step1: 根据应用ID,事务分组,名字,超时时间创建一个GloabSession,这个在前面也提到过它和branchSession分别是什么。

-

step2:对其添加一个RootSessionManager用于监听一些事件,这里要说一下目前在Seata里面有四种类型的Listener(这里要说明的是所有的sessionManager都实现了SessionLifecycleListener):

-
    -
  • ROOT_SESSION_MANAGER:最全,最大的,拥有所有的Session。
  • -
  • ASYNC_COMMITTING_SESSION_MANAGER:用于管理需要做异步commit的Session。
  • -
  • RETRY_COMMITTING_SESSION_MANAGER:用于管理重试commit的Session。
  • -
  • RETRY_ROLLBACKING_SESSION_MANAGER:用于管理重试回滚的Session。 -由于这里是开启事务,其它SessionManager不需要关注,我们只添加RootSessionManager即可。
  • -
-

step3:开启Globalsession

-

-

这一步会把状态变为Begin,记录开始时间,并且调用RootSessionManager的onBegin监听方法,将Session保存到map并写入到我们的文件。

-

step4:最后返回XID,这个XID是由 ip+port+transactionId 组成的,非常重要,当TM申请到之后需要将这个ID传到RM中,RM通过XID来决定到底应该访问哪一台Server。

-

3.3 BranchRegister-分支事务注册

-

当我们全局事务在TM开启之后,我们RM的分支事务也需要注册到我们的全局事务之上,这里看看是如何处理的:

-

-

step1:通过transactionId获取并校验全局事务是否是开启状态。

-

step2:创建一个新的分支事务,也就是我们的BranchSession。

-

step3:对分支事务进行加全局锁,这里的逻辑就是使用的我们锁模块的逻辑。

-

step4:添加branchSession,主要是将其添加到globalSession对象中,并写入到我们的文件中。

-

step5:返回branchId,这个ID也很重要,我们后续需要用它来回滚我们的事务,或者对我们分支事务状态更新。

-

分支事务注册之后,还需要汇报分支事务的本地事务的执行到底是成功还是失败,在Server目前只是简单的做一下保存记录,汇报的目的是,就算这个分支事务失败,如果TM还是执意要提交全局事务(catch 异常不抛出),那么再遍历提交分支事务的时候,这个失败的分支事务就不需要提交(用户选择性跳过)。

-

3.4 GlobalCommit - 全局提交

-

当我们分支事务执行完成之后,就轮到我们的TM-事务管理器来决定是提交还是回滚,如果是提交,那么就会走到下面的逻辑:

-

-

step1:首先找到我们的globalSession。如果它为null证明已经被commit过了,那么直接幂等操作,返回成功。

-

step2:关闭我们的GloabSession防止再次有新的branch进来(跨服务调用超时回滚,provider在继续执行)。

-

step3:如果status是等于Begin,那么久证明还没有提交过,改变其状态为Committing也就是正在提交。

-

step4:判断是否是可以异步提交,目前只有AT模式可以异步提交,二阶段全局提交时只是删除undolog并无严格顺序,此处使用定时任务,客户端收到后批量合并删除。

-

step5:如果是异步提交,直接将其放进我们ASYNC_COMMITTING_SESSION_MANAGER,让其再后台线程异步去做我们的step6,如果是同步的那么直接执行我们的step6。

-

step6:遍历我们的BranchSession进行提交,如果某个分支事务失败,根据不同的条件来判断是否进行重试,可异步执行此branchSession不成功可以继续执行下一个,因为其本身都在manager中,只要没有成功就不会被删除会一直重试,如果是同步提交的会放进重试队列进行定时重试并卡住按照顺序提交。

-

3.5 GlobalRollback - 全局回滚

-

如果我们的TM决定全局回滚,那么会走到下面的逻辑:

-

-

这个逻辑和提交流程基本一致,可以看作是它的反向,这里就不展开讲了。

-

4.总结

-

最后在总结一下开始我们提出了分布式事务的关键4点,Seata到底是怎么解决的:

-
    -
  • 正确的协调:通过后台定时任务各种正确的重试,并且未来会推出监控平台有可能可以手动回滚。
  • -
  • 高可用: 通过HA-Cluster保证高可用。
  • -
  • 高性能:文件顺序写,RPC通过netty实现,Seata未来可以水平扩展,提高处理性能。
  • -
  • 高扩展性:提供给用户可以自由实现的地方,比如配置,服务发现和注册,全局锁等等。
  • -
-

最后希望大家能从这篇文章能了解Seata-Server的核心设计原理,当然你也可以想象如果你自己去实现一个分布式事务的Server应该怎样去设计?

-

Seata GitHub地址:https://github.com/seata/seata

-

本文作者:

-

李钊,GitHub ID @CoffeeLatte007,公众号「咖啡拿铁」作者,Seata社区 Committer,猿辅导Java工程师,曾就职于美团。对分布式中间件,分布式系统有浓厚的兴趣。
-季敏(清铭),GitHub ID @slievrly,Seata 开源项目负责人,阿里巴巴中间件 TXC/GTS 核心研发成员,长期从事于分布式中间件核心研发工作,在分布式事务领域有着较丰富的技术积累。

-
- - - - - - - diff --git a/zh-cn/blog/seata-analysis-java-server.json b/zh-cn/blog/seata-analysis-java-server.json deleted file mode 100644 index 4aca814b..00000000 --- a/zh-cn/blog/seata-analysis-java-server.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "filename": "seata-analysis-java-server.md", - "__html": "

1.关于Seata

\n

再前不久,我写了一篇关于分布式事务中间件Fescar的解析,没过几天Fescar团队对其进行了品牌升级,取名为Seata(Simpe Extensible Autonomous Transcaction Architecture),而以前的Fescar的英文全称为Fast & EaSy Commit And Rollback。可以看见Fescar从名字上来看更加局限于Commit和Rollback,而新的品牌名字Seata旨在打造一套一站式分布式事务解决方案。更换名字之后,我对其未来的发展更有信心。

\n

这里先大概回忆一下Seata的整个过程模型:

\n

\"\"

\n
    \n
  • TM:事务的发起者。用来告诉TC,全局事务的开始,提交,回滚。
  • \n
  • RM:具体的事务资源,每一个RM都会作为一个分支事务注册在TC。
  • \n
  • TC:事务的协调者。也可以看做是Fescar-server,用于接收我们的事务的注册,提交和回滚。
  • \n
\n

在之前的文章中对整个角色有个大体的介绍,在这篇文章中我将重点介绍其中的核心角色TC,也就是事务协调器。

\n

2.Transcation Coordinator

\n

为什么之前一直强调 TC 是核心呢?那因为TC这个角色就好像上帝一样,管控着云云众生的RM和TM。如果TC一旦不好使,那么RM和TM一旦出现小问题,那必定会乱的一塌糊涂。所以要想了解Seata,那么必须要了解它的TC。

\n

那么一个优秀的事务协调者应该具备哪些能力呢?我觉得应该有以下几个:

\n
    \n
  • 正确的协调:能正确的协调RM和TM接下来应该做什么,做错了应该怎么办,做对了应该怎么办。
  • \n
  • 高可用: 事务协调器在分布式事务中很重要,如果不能保证高可用,那么它也没有存在的必要了。
  • \n
  • 高性能:事务协调器的性能一定要高,如果事务协调器性能有瓶颈那么它所管理的RM和TM那么会经常遇到超时,从而引起回滚频繁。
  • \n
  • 高扩展性:这个特点是属于代码层面的,如果是一个优秀的框架,那么需要给使用方很多自定义扩展,比如服务注册/发现,读取配置等等。
  • \n
\n

下面我也将逐步阐述Seata是如何做到上面四点。

\n

2.1 Seata-Server的设计

\n

\"\"

\n

Seata-Server整体的模块图如上所示:

\n
    \n
  • Coordinator Core: 在最下面的模块是事务协调器核心代码,主要用来处理事务协调的逻辑,如是否commit,rollback等协调活动。
  • \n
  • Store:存储模块,用来将我们的数据持久化,防止重启或者宕机数据丢失。
  • \n
  • Discovery: 服务注册/发现模块,用于将Server地址暴露给我们Client。
  • \n
  • Config: 用来存储和查找我们服务端的配置。
  • \n
  • Lock: 锁模块,用于给Seata提供全局锁的功能。
  • \n
  • RPC:用于和其它端通信。
  • \n
  • HA-Cluster:高可用集群,目前还没开源,为Seata提供可靠的高可用服务,预计将会在0.6版本开源。
  • \n
\n

2.2 Discovery

\n

首先来讲讲比较基础的Discovery模块,又称服务注册/发现模块。我们将Seata-Sever启动之后,需要将自己的地址暴露给其它使用者,那么就需要我们这个模块帮忙。

\n

\"\"

\n

这个模块有个核心接口RegistryService,如上图所示:

\n
    \n
  • register:服务端使用,进行服务注册。
  • \n
  • unregister:服务端使用,一般在JVM关闭钩子,ShutdownHook中调用。
  • \n
  • subscribe:客户端使用,注册监听事件,用来监听地址的变化。
  • \n
  • unsubscribe:客户端使用,取消注册监听事件。
  • \n
  • lookup:客户端使用,根据key查找服务地址列表。
  • \n
  • close:都可以使用,用于关闭Registry资源。
  • \n
\n

如果需要添加自己定义的服务注册/发现,那么实现这个接口即可。截止目前在社区的不断开发推动下,已经有五种服务注册/发现,分别是redis、zk、nacos、eruka 和 consul。下面简单介绍下Nacos的实现:

\n

2.2.1 register接口:

\n

\"\"

\n

step1:校验地址是否合法

\n

step2:获取Nacos的 Naming 实例,然后将地址注册到服务名为 serverAddr(固定服务名) 的对应集群分组(registry.conf 文件配置)上面。

\n

unregister接口类似,这里不做详解。

\n

2.2.2 lookup接口:

\n

\"\"

\n

step1:获取当前clusterName名字。

\n

step2:判断当前集群名对应的服务是否已经订阅过了,如果是直接从map中取订阅返回的数据。

\n

step3:如果没有订阅先主动查询一次服务实例列表,然后添加订阅并将订阅返回的数据存放到map中,之后直接从map获取最新数据。

\n

2.2.3 subscribe接口

\n

\"\"

\n

这个接口比较简单,具体分两步:

\n

step1:对将要订阅的cluster-> listener 存放到map中,此处nacos未提交单机已订阅列表,所以需要自己实现。

\n

step2:使用Nacos api 订阅。

\n

2.3 Config

\n

配置模块也是一个比较基础,比较简单的模块。我们需要配置一些常用的参数比如:Netty的select线程数量,work线程数量,session允许最大为多少等等,当然这些参数再Seata中都有自己的默认设置。

\n

同样的在Seata中也提供了一个接口Configuration,用来自定义我们需要的获取配置的地方:

\n

\"\"

\n
    \n
  • getInt/Long/Boolean/getConfig():通过dataId来获取对应的值,读取不到配置、异常或超时将返回参数中的默认值。
  • \n
  • putConfig:用于添加配置。
  • \n
  • removeConfig:删除一个配置。
  • \n
  • add/remove/get ConfigListener:添加/删除/获取 配置监听器,一般用来监听配置的变更。
  • \n
\n

目前为止有四种方式获取Config:File(文件获取)、Nacos、Apollo 和 ZK(不推荐)。在Seata中首先需要配置registry.conf,来配置config.type 。实现conf比较简单这里就不深入分析。

\n

2.4 Store

\n

存储层的实现对于Seata是否高性能,是否可靠非常关键。\n如果存储层没有实现好,那么如果发生宕机,在TC中正在进行分布式事务处理的数据将会被丢失,既然使用了分布式事务,那么其肯定不能容忍丢失。如果存储层实现好了,但是其性能有很大问题,RM可能会发生频繁回滚那么其完全无法应对高并发的场景。

\n

在Seata中默认提供了文件方式的存储,下面我们定义我们存储的数据为Session,而我们的TM创造的全局事务操作数据叫GloabSession,RM创造的分支事务操作数据叫BranchSession,一个GloabSession可以拥有多个BranchSession。我们的目的就是要将这么多Session存储下来。

\n

在FileTransactionStoreManager#writeSession代码中:

\n

\"\"

\n

上面的代码主要分为下面几步:

\n
    \n
  • step1:生成一个TransactionWriteFuture。
  • \n
  • step2:将这个futureRequest丢进一个LinkedBlockingQueue中。为什么需要将所有数据都丢进队列中呢?当然这里其实也可以用锁来实现,再另外一个阿里开源的RocketMQ中,使用的锁。不论是队列还是锁它们的目的是为了保证单线程写,这又是为什么呢?有人会解释说,需要保证顺序写,这样速度就很快,这个理解是错误的,我们的FileChannel的写方法是线程安全的,已经能保证顺序写了。保证单线程写其实是为了让我们这个写逻辑都是单线程的,因为可能有些文件写满或者记录写数据位置等等逻辑,当然这些逻辑都可以主动加锁去做,但是为了实现简单方便,直接再整个写逻辑排队处理是最为合适的。
  • \n
  • step3:调用future.get,等待我们该条数据写逻辑完成通知。
  • \n
\n

我们将数据提交到队列之后,我们接下来需要对其进行消费,代码如下:

\n

\"\"

\n

这里将一个WriteDataFileRunnable()提交进我们的线程池,这个Runnable的run()方法如下:

\n

\"\"

\n

分为下面几步:

\n

step1: 判断是否停止,如果stopping为true则返回null。

\n

step2:从我们的队列中获取数据。

\n

step3:判断future是否已经超时了,如果超时,则设置结果为false,此时我们生产者get()方法会接触阻塞。

\n

step4:将我们的数据写进文件,此时数据还在pageCahce层并没有刷新到磁盘,如果写成功然后根据条件判断是否进行刷盘操作。

\n

step5:当写入数量到达一定的时候,或者写入时间到达一定的时候,需要将我们当前的文件保存为历史文件,删除以前的历史文件,然后创建新的文件。这一步是为了防止我们文件无限增长,大量无效数据浪费磁盘资源。

\n

在我们的writeDataFile中有如下代码:

\n

\"\"

\n

step1:首先获取我们的ByteBuffer,如果超出最大循环BufferSize就直接创建一个新的,否则就使用我们缓存的Buffer。这一步可以很大的减少GC。

\n

step2:然后将数据添加进入ByteBuffer。

\n

step3:最后将ByteBuffer写入我们的fileChannel,这里会重试三次。此时的数据还在pageCache层,受两方面的影响,OS有自己的刷新策略,但是这个业务程序不能控制,为了防止宕机等事件出现造成大量数据丢失,所以就需要业务自己控制flush。下面是flush的代码:

\n

\"\"

\n

这里flush的条件写入一定数量或者写的时间超过一定时间,这样也会有个小问题如果是停电,那么pageCache中有可能还有数据并没有被刷盘,会导致少量的数据丢失。目前还不支持同步模式,也就是每条数据都需要做刷盘操作,这样可以保证每条消息都落盘,但是性能也会受到极大的影响,当然后续会不断的演进支持。

\n

我们的store核心流程主要是上面几个方法,当然还有一些比如,session重建等,这些比较简单,读者可以自行阅读。

\n

2.5 Lock

\n

大家知道数据库实现隔离级别主要是通过锁来实现的,同样的再分布式事务框架Seata中要实现隔离级别也需要通过锁。一般在数据库中数据库的隔离级别一共有四种:读未提交,读已提交,可重复读,串行化。在Seata中可以保证隔离级别是读已提交,但是提供了达到读已提交隔离的手段。

\n

Lock模块也就是Seata实现隔离级别的核心模块。在Lock模块中提供了一个接口用于管理我们的锁:\n\"\"

\n

其中有三个方法:

\n
    \n
  • acquireLock:用于对我们的BranchSession加锁,这里虽然是传的分支事务Session,实际上是对分支事务的资源加锁,成功返回true。
  • \n
  • isLockable:根据事务ID,资源Id,锁住的Key来查询是否已经加锁。
  • \n
  • cleanAllLocks:清除所有的锁。\n对于锁我们可以在本地实现,也可以通过redis或者mysql来帮助我们实现。官方默认提供了本地全局锁的实现:\n\"\"
  • \n
\n

在本地锁的实现中有两个常量需要关注:

\n
    \n
  • BUCKET_PER_TABLE:用来定义每个table有多少个bucket,目的是为了后续对同一个表加锁的时候减少竞争。
  • \n
  • LOCK_MAP:这个map从定义上来看非常复杂,里里外外套了很多层Map,这里用个表格具体说明一下:
  • \n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
层数keyvalue
1-LOCK_MAPresourceId(jdbcUrl)dbLockMap
2- dbLockMaptableName (表名)tableLockMap
3- tableLockMapPK.hashcode%Bucket (主键值的hashcode%bucket)bucketLockMap
4- bucketLockMapPKtrascationId
\n

可以看见实际上的加锁在bucketLockMap这个map中,这里具体的加锁方法比较简单就不作详细阐述,主要是逐步的找到bucketLockMap,然后将当前trascationId塞进去,如果这个主键当前有TranscationId,那么比较是否是自己,如果不是则加锁失败。

\n

2.6 RPC

\n

保证Seata高性能的关键之一也是使用了Netty作为RPC框架,采用默认配置的线程模型如下图所示:

\n

\"\"

\n

如果采用默认的基本配置那么会有一个Acceptor线程用于处理客户端的链接,会有cpu*2数量的NIO-Thread,再这个线程中不会做业务太重的事情,只会做一些速度比较快的事情,比如编解码,心跳事件,和TM注册。一些比较费时间的业务操作将会交给业务线程池,默认情况下业务线程池配置为最小线程为100,最大为500。

\n

Seata 目前允许配置的传输层配置如图所示,用户可根据需要进行Netty传输层面的调优,配置通过配置中心配置,首次加载时生效。

\n \n这里需要提一下的是Seata的心跳机制,这里是使用Netty的IdleStateHandler完成的,如下:\n

\"\"

\n

在Sever端对于写没有设置最大空闲时间,对于读设置了最大空闲时间,默认为15s(客户端默认写空闲为5s,发送ping消息),如果超过15s则会将链接断开,关闭资源。

\n

\"\"

\n

step1:判断是否是读空闲的检测事件。

\n

step2:如果是则断开链接,关闭资源。
\n另外Seata 做了内存池、客户端做了批量小包合并发送、Netty连接池(减少连接创建时的服务不可用时间)等功能,以下为批量小包合并功能。
\n
\n客户端的消息发送并不是真正的消息发送通过 AbstractRpcRemoting#sendAsyncRequest 包装成 RpcMessage 存储至 basket 中并唤醒合并发送线程。合并发送线程通过 while true 的形式\n最长等待1ms对basket的消息取出包装成 merge 消息进行真正发送,此时若 channel 出现异常则会通过 fail-fast 快速失败返回结果。merge消息发送前在 map 中标识,收到结果后批量确认(AbstractRpcRemotingClient#channelRead),并通过 dispatch 分发至 messageListener 和 handler 去处理。同时,timerExecutor 定时对已发送\n消息进行超时检测,若超时置为失败。具体消息协议设计将会在后续的文章中给出,敬请关注。
\nSeata 的 Netty Client由 TMClient和RMClient 组成,根据事务角色功能区分,都继承 AbstractRpcRemotingClient,AbstractRpcRemotingClient 实现了 RemotingService(服务启停), RegisterMsgListener(netty 连接池连接创建回调)和 ClientMessageSender(消息发送)继承了 AbstractRpcRemoting( Client和Server 顶层消息发送和处理的模板)。
\nRMClient类关系图如下图所示:\n\"\"\nTMClient 和 RMClient 又会根据自身的 poolConfig 配置与 NettyPoolableFactory implements KeyedPoolableObjectFactory<NettyPoolKey, Channel> 进行 channel 连接的交互,channel 连接池根据角色 key+ip 作为连接池的 key 来定位各个连接池\n,连接池对 channel 进行统一的管理。TMClient 和 RMClient 在发送过程中对于每个 ip 只会使用一个长连接,但连接不可用时,会从连接池中快速取出已经创建好并可用的连接,减少服务的不可用时间。

\n

2.7 HA-Cluster

\n

目前官方没有公布HA-Cluster,但是通过一些其它中间件和官方的一些透露,可以将HA-Cluster用如下方式设计:\n\"\"

\n

具体的流程如下:

\n

step1:客户端发布信息的时候根据transcationId保证同一个transcation是在同一个master上,通过多个Master水平扩展,提供并发处理性能。

\n

step2:在server端中一个master有多个slave,master中的数据近实时同步到slave上,保证当master宕机的时候,还能有其它slave顶上来可以用。

\n

当然上述一切都是猜测,具体的设计实现还得等0.5版本之后。目前有一个Go版本的Seata-Server也捐赠给了Seata(还在流程中),其通过raft实现副本一致性,其它细节不是太清楚。

\n

2.8 Metrics

\n

这个模块也是一个没有具体公布实现的模块,当然有可能会提供插件口,让其它第三方metric接入进来,最近Apache SkyWalking 正在和Seata小组商讨如何接入进来。

\n

3.Coordinator Core

\n

上面我们讲了很多Server基础模块,想必大家对Seata的实现已经有个大概,接下来我会讲解事务协调器具体逻辑是如何实现的,让大家更加了解Seata的实现内幕。

\n

3.1 启动流程

\n

启动方法在Server类有个main方法,定义了我们启动流程:

\n

\"\"

\n

step1:创建一个RpcServer,再这个里面包含了我们网络的操作,用Netty实现了服务端。

\n

step2:解析端口号、本地文件地址(用户Server宕机未处理完成事务恢复)、IP(可选,本机只能获取内网ip,在跨网络时需要一个对外的vip注册服务)。

\n

step3:初始化SessionHoler,其中最重要的重要就是重我们dataDir这个文件夹中恢复我们的数据,重建我们的Session。

\n

step4:创建一个CoorDinator,这个也是我们事务协调器的逻辑核心代码,然后将其初始化,其内部初始化的逻辑会创建四个定时任务:

\n
    \n
  • retryRollbacking:重试rollback定时任务,用于将那些失败的rollback进行重试的,每隔5ms执行一次。
  • \n
  • retryCommitting:重试commit定时任务,用于将那些失败的commit进行重试的,每隔5ms执行一次。
  • \n
  • asyncCommitting:异步commit定时任务,用于执行异步的commit,每隔10ms一次。
  • \n
  • timeoutCheck:超时定时任务检测,用于检测超时的任务,然后执行超时的逻辑,每隔2ms执行一次。
  • \n
\n

step5: 初始化UUIDGenerator这个也是我们生成各种ID(transcationId,branchId)的基本类。

\n

step6:将本地IP和监听端口设置到XID中,初始化rpcServer等待客户端的连接。

\n

启动流程比较简单,下面我会介绍分布式事务框架中的常见的一些业务逻辑Seata是如何处理的。

\n

3.2 Begin-开启全局事务

\n

一次分布式事务的起始点一定是开启全局事务,首先我们看看全局事务Seata是如何实现的:

\n

\"\"

\n

step1: 根据应用ID,事务分组,名字,超时时间创建一个GloabSession,这个在前面也提到过它和branchSession分别是什么。

\n

step2:对其添加一个RootSessionManager用于监听一些事件,这里要说一下目前在Seata里面有四种类型的Listener(这里要说明的是所有的sessionManager都实现了SessionLifecycleListener):

\n
    \n
  • ROOT_SESSION_MANAGER:最全,最大的,拥有所有的Session。
  • \n
  • ASYNC_COMMITTING_SESSION_MANAGER:用于管理需要做异步commit的Session。
  • \n
  • RETRY_COMMITTING_SESSION_MANAGER:用于管理重试commit的Session。
  • \n
  • RETRY_ROLLBACKING_SESSION_MANAGER:用于管理重试回滚的Session。\n由于这里是开启事务,其它SessionManager不需要关注,我们只添加RootSessionManager即可。
  • \n
\n

step3:开启Globalsession

\n

\"\"

\n

这一步会把状态变为Begin,记录开始时间,并且调用RootSessionManager的onBegin监听方法,将Session保存到map并写入到我们的文件。

\n

step4:最后返回XID,这个XID是由 ip+port+transactionId 组成的,非常重要,当TM申请到之后需要将这个ID传到RM中,RM通过XID来决定到底应该访问哪一台Server。

\n

3.3 BranchRegister-分支事务注册

\n

当我们全局事务在TM开启之后,我们RM的分支事务也需要注册到我们的全局事务之上,这里看看是如何处理的:

\n

\"\"

\n

step1:通过transactionId获取并校验全局事务是否是开启状态。

\n

step2:创建一个新的分支事务,也就是我们的BranchSession。

\n

step3:对分支事务进行加全局锁,这里的逻辑就是使用的我们锁模块的逻辑。

\n

step4:添加branchSession,主要是将其添加到globalSession对象中,并写入到我们的文件中。

\n

step5:返回branchId,这个ID也很重要,我们后续需要用它来回滚我们的事务,或者对我们分支事务状态更新。

\n

分支事务注册之后,还需要汇报分支事务的本地事务的执行到底是成功还是失败,在Server目前只是简单的做一下保存记录,汇报的目的是,就算这个分支事务失败,如果TM还是执意要提交全局事务(catch 异常不抛出),那么再遍历提交分支事务的时候,这个失败的分支事务就不需要提交(用户选择性跳过)。

\n

3.4 GlobalCommit - 全局提交

\n

当我们分支事务执行完成之后,就轮到我们的TM-事务管理器来决定是提交还是回滚,如果是提交,那么就会走到下面的逻辑:

\n

\"\"

\n

step1:首先找到我们的globalSession。如果它为null证明已经被commit过了,那么直接幂等操作,返回成功。

\n

step2:关闭我们的GloabSession防止再次有新的branch进来(跨服务调用超时回滚,provider在继续执行)。

\n

step3:如果status是等于Begin,那么久证明还没有提交过,改变其状态为Committing也就是正在提交。

\n

step4:判断是否是可以异步提交,目前只有AT模式可以异步提交,二阶段全局提交时只是删除undolog并无严格顺序,此处使用定时任务,客户端收到后批量合并删除。

\n

step5:如果是异步提交,直接将其放进我们ASYNC_COMMITTING_SESSION_MANAGER,让其再后台线程异步去做我们的step6,如果是同步的那么直接执行我们的step6。

\n

step6:遍历我们的BranchSession进行提交,如果某个分支事务失败,根据不同的条件来判断是否进行重试,可异步执行此branchSession不成功可以继续执行下一个,因为其本身都在manager中,只要没有成功就不会被删除会一直重试,如果是同步提交的会放进重试队列进行定时重试并卡住按照顺序提交。

\n

3.5 GlobalRollback - 全局回滚

\n

如果我们的TM决定全局回滚,那么会走到下面的逻辑:

\n

\"\"

\n

这个逻辑和提交流程基本一致,可以看作是它的反向,这里就不展开讲了。

\n

4.总结

\n

最后在总结一下开始我们提出了分布式事务的关键4点,Seata到底是怎么解决的:

\n
    \n
  • 正确的协调:通过后台定时任务各种正确的重试,并且未来会推出监控平台有可能可以手动回滚。
  • \n
  • 高可用: 通过HA-Cluster保证高可用。
  • \n
  • 高性能:文件顺序写,RPC通过netty实现,Seata未来可以水平扩展,提高处理性能。
  • \n
  • 高扩展性:提供给用户可以自由实现的地方,比如配置,服务发现和注册,全局锁等等。
  • \n
\n

最后希望大家能从这篇文章能了解Seata-Server的核心设计原理,当然你也可以想象如果你自己去实现一个分布式事务的Server应该怎样去设计?

\n

Seata GitHub地址:https://github.com/seata/seata

\n

本文作者:

\n

李钊,GitHub ID @CoffeeLatte007,公众号「咖啡拿铁」作者,Seata社区 Committer,猿辅导Java工程师,曾就职于美团。对分布式中间件,分布式系统有浓厚的兴趣。
\n季敏(清铭),GitHub ID @slievrly,Seata 开源项目负责人,阿里巴巴中间件 TXC/GTS 核心研发成员,长期从事于分布式中间件核心研发工作,在分布式事务领域有着较丰富的技术积累。

\n", - "link": "/zh-cn/blog/seata-analysis-java-server.html", - "meta": { - "title": "深度剖析一站式分布式事务方案Seata-Server", - "author": "李钊,季敏", - "date": "2019/04/08", - "keywords": "fescar、seata、分布式事务" - } -} \ No newline at end of file diff --git a/zh-cn/blog/seata-analysis-simple.html b/zh-cn/blog/seata-analysis-simple.html deleted file mode 100644 index 77cd4a33..00000000 --- a/zh-cn/blog/seata-analysis-simple.html +++ /dev/null @@ -1,469 +0,0 @@ - - - - - - - - - - Fescar分布式事务原理解析探秘 - - - - -

前言

-

fescar发布已有时日,分布式事务一直是业界备受关注的领域,fescar发布一个月左右便受到了近5000个star足以说明其热度。当然,在fescar出来之前, -已经有比较成熟的分布式事务的解决方案开源了,比较典型的方案如 LCN 的2pc型无侵入事务, -目前lcn已发展到5.0,已支持和fescar事务模型类似的TCX型事务。还有如TCC型事务实现 hmily tcc-transaction 等。 -在微服务架构流行的当下、阿里这种开源大户背景下,fescar的发布无疑又掀起了研究分布式事务的热潮。fescar脱胎于阿里云商业分布式事务服务GTS,在线上环境提供这种公共服务其模式肯定经受了非常严苛的考验。其分布式事务模型TXC又仿于传统事务模型XA方案,主要区别在于资源管理器的定位一个在应用层一个在数据库层。博主觉得fescar的txc模型实现非常有研究的价值,所以今天我们来好好翻一翻fescar项目的代码。本文篇幅较长,浏览并理解本文大概耗时30~60分钟左右。

-

项目地址

-

fescar:https://github.com/alibaba/fescar

-

本博文所述代码为fescar的0.1.2-SNAPSHOT版本,根据fescar后期的迭代计划,其项目结构和模块实现都可能有很大的改变,特此说明。

-

fescar的TXC模型

-

-

上图为fescar官方针对TXC模型制作的示意图。不得不说大厂的图制作的真的不错,结合示意图我们可以看到TXC实现的全貌。TXC的实现通过三个组件来完成。也就是上图的三个深黄色部分,其作用如下:

-
    -
  1. TM:全局事务管理器,在标注开启fescar分布式事务的服务端开启,并将全局事务发送到TC事务控制端管理
  2. -
  3. TC:事务控制中心,控制全局事务的提交或者回滚。这个组件需要独立部署维护,目前只支持单机版本,后续迭代计划会有集群版本
  4. -
  5. RM:资源管理器,主要负责分支事务的上报,本地事务的管理
  6. -
-

一段话简述其实现过程:服务起始方发起全局事务并注册到TC。在调用协同服务时,协同服务的事务分支事务会先完成阶段一的事务提交或回滚,并生成事务回滚的undo_log日志,同时注册当前协同服务到TC并上报其事务状态,归并到同一个业务的全局事务中。此时若没有问题继续下一个协同服务的调用,期间任何协同服务的分支事务回滚,都会通知到TC,TC在通知全局事务包含的所有已完成一阶段提交的分支事务回滚。如果所有分支事务都正常,最后回到全局事务发起方时,也会通知到TC,TC在通知全局事务包含的所有分支删除回滚日志。在这个过程中为了解决写隔离和度隔离的问题会涉及到TC管理的全局锁。

-

本博文的目标是深入代码细节,探究其基本思路是如何实现的。首先会从项目的结构来简述每个模块的作用,继而结合官方自带的examples实例来探究整个分布式事务的实现过程。

-

项目结构解析

-

项目拉下来,用IDE打开后的目录结构如下,下面先大致的看下每个模块的实现

-

-
    -
  • common :公共组件,提供常用辅助类,静态变量、扩展机制类加载器、以及定义全局的异常等
  • -
  • config : 配置加载解析模块,提供了配置的基础接口,目前只有文件配置实现,后续会有nacos等配置中心的实现
  • -
  • core : 核心模块主要封装了TM、RM和TC通讯用RPC相关内容
  • -
  • dubbo :dubbo模块主要适配dubbo通讯框架,使用dubbo的filter机制来传统全局事务的信息到分支
  • -
  • examples :简单的演示实例模块,等下从这个模块入手探索
  • -
  • rm-datasource :资源管理模块,比较核心的一个模块,个人认为这个模块命名为core要更合理一点。代理了JDBC的一些类,用来解析sql生成回滚日志、协调管理本地事务
  • -
  • server : TC组件所在,主要协调管理全局事务,负责全局事务的提交或者回滚,同时管理维护全局锁。
  • -
  • spring :和spring集成的模块,主要是aop逻辑,是整个分布式事务的入口,研究fescar的突破口
  • -
  • tm : 全局事务事务管理模块,管理全局事务的边界,全局事务开启回滚点都在这个模块控制
  • -
-

通过【examples】模块的实例看下效果

-

第一步、先启动TC也就是【Server】模块,main方法直接启动就好,默认服务端口8091

-

第二步、回到examples模块,将订单,业务,账户、仓库四个服务的配置文件配置好,主要是mysql数据源和zookeeper连接地址,这里要注意下,默认dubbo的zk注册中心依赖没有,启动的时候回抛找不到class的异常,需要添加如下的依赖:

-
<dependency>
-    <groupId>com.101tec</groupId>
-    <artifactId>zkclient</artifactId>
-    <version>0.10</version>
-    <exclusions>
-        <exclusion>
-            <artifactId>slf4j-log4j12</artifactId>
-            <groupId>org.slf4j</groupId>
-        </exclusion>
-    </exclusions>
-</dependency>
-
-

第三步、在BusinessServiceImpl中的模拟抛异常的地方打个断点,依次启动OrderServiceImpl、StorageServiceImpl、AccountServiceImpl、BusinessServiceImpl四个服务、等进断点后,查看数据库account_tbl表,金额已减去400元,变成了599元。然后放开断点、BusinessServiceImpl模块模拟的异常触发,全局事务回滚,account_tbl表的金额就又回滚到999元了

-

如上,我们已经体验到fescar事务的控制能力了,下面我们具体看下它是怎么控制的。

-

fescar事务过程分析

-

首先分析配置文件

-

这个是一个铁律,任何一个技术或框架要集成,配置文件肯定是一个突破口。从上面的例子我们了解到,实例模块的配置文件中配置了一个全局事务扫描器实例,如:

-
<bean class="com.alibaba.fescar.spring.annotation.GlobalTransactionScanner">
-    <constructor-arg value="dubbo-demo-app"/>
-    <constructor-arg value="my\_test\_tx_group"/>
-</bean>
-
-

这个实例在项目启动时会扫描所有实例,具体实现见【spring】模块。并将标注了@GlobalTransactional注解的方法织入GlobalTransactionalInterceptor的invoke方法逻辑。同时应用启动时,会初始化TM(TmRpcClient)和RM(RmRpcClient)的实例,这个时候,服务已经和TC事务控制中心勾搭上了。在往下看就涉及到TM模块的事务模板类TransactionalTemplate。

-

【TM】模块启动全局事务

-

全局事务的开启,提交、回滚都被封装在TransactionalTemplate中完成了,代码如:

-

-public Object execute(TransactionalExecutor business) throws TransactionalExecutor.ExecutionException {
-    // 1. get or create a transaction
-    GlobalTransaction tx = GlobalTransactionContext.getCurrentOrCreate();
-    // 2. begin transaction
-    try {
-        tx.begin(business.timeout(), business.name());
-    } catch (TransactionException txe) {
-        throw new TransactionalExecutor.ExecutionException(tx, txe,
-            TransactionalExecutor.Code.BeginFailure);
-    }
-    Object rs = null;
-    try {
-        // Do Your Business
-        rs = business.execute();
-    } catch (Throwable ex) {
-        // 3. any business exception, rollback.
-        try {
-            tx.rollback();
-            // 3.1 Successfully rolled back
-            throw new TransactionalExecutor.ExecutionException(tx, TransactionalExecutor.Code.RollbackDone, ex);
-        } catch (TransactionException txe) {
-            // 3.2 Failed to rollback
-            throw new TransactionalExecutor.ExecutionException(tx, txe,
-                TransactionalExecutor.Code.RollbackFailure, ex);
-        }
-    }
-    // 4. everything is fine, commit.
-    try {
-        tx.commit();
-    } catch (TransactionException txe) {
-        // 4.1 Failed to commit
-        throw new TransactionalExecutor.ExecutionException(tx, txe,
-            TransactionalExecutor.Code.CommitFailure);
-    }
-    return rs;
-}
-
-

更详细的实现在【TM】模块中被分成了两个Class实现,如下:

-

DefaultGlobalTransaction :全局事务具体的开启,提交、回滚动作

-

DefaultTransactionManager :负责使用TmRpcClient向TC控制中心发送指令,如开启全局事务(GlobalBeginRequest)、提交(GlobalCommitRequest)、回滚(GlobalRollbackRequest)、查询状态(GlobalStatusRequest)等。

-

以上是TM模块核心内容点,TM模块完成全局事务开启后,接下来就开始看看全局事务iD,xid是如何传递、RM组件是如何介入的

-

【dubbo】全局事务xid的传递

-

首先是xid的传递,目前已经实现了dubbo框架实现的微服务架构下的传递,其他的像spring cloud和motan等的想要实现也很容易,通过一般RPC通讯框架都有的filter机制,将xid从全局事务的发起节点传递到服务协从节点,从节点接收到后绑定到当前线程上线文环境中,用于在分支事务执行sql时判断是否加入全局事务。fescar的实现见【dubbo】模块如下:

-
@Activate(group = { Constants.PROVIDER, Constants.CONSUMER }, order = 100)
-public class TransactionPropagationFilter implements Filter {
-
-    private static final Logger LOGGER = LoggerFactory.getLogger(TransactionPropagationFilter.class);
-
-    @Override
-    public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException {
-        String xid = RootContext.getXID();
-        String rpcXid = RpcContext.getContext().getAttachment(RootContext.KEY_XID);
-        if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug("xid in RootContext\[" + xid + "\] xid in RpcContext\[" + rpcXid + "\]");
-        }
-        boolean bind = false;
-        if (xid != null) {
-            RpcContext.getContext().setAttachment(RootContext.KEY_XID, xid);
-        } else {
-            if (rpcXid != null) {
-                RootContext.bind(rpcXid);
-                bind = true;
-                if (LOGGER.isDebugEnabled()) {
-                    LOGGER.debug("bind\[" + rpcXid + "\] to RootContext");
-                }
-            }
-        }
-        try {
-            return invoker.invoke(invocation);
-
-        } finally {
-            if (bind) {
-                String unbindXid = RootContext.unbind();
-                if (LOGGER.isDebugEnabled()) {
-                    LOGGER.debug("unbind\[" + unbindXid + "\] from RootContext");
-                }
-                if (!rpcXid.equalsIgnoreCase(unbindXid)) {
-                    LOGGER.warn("xid in change during RPC from " + rpcXid + " to " + unbindXid);
-                    if (unbindXid != null) {
-                        RootContext.bind(unbindXid);
-                        LOGGER.warn("bind \[" + unbindXid + "\] back to RootContext");
-                    }
-                }
-            }
-        }
-    }
-}
-
-

上面代码rpcXid不为空时,就加入到了RootContext的ContextCore中,这里稍微深入讲下。ContextCore是一个可扩展实现的接口,目前默认的实现是ThreadLocalContextCore,基于ThreadLocal来保存维护当前的xid。这里fescar提供了可扩展的机制,实现在【common】模块中,通过一个自定义的类加载器EnhancedServiceLoader加载需要扩展的服务类,这样只需要在扩展类加上@LoadLevel注解。标记order属性声明高优先级别,就可以达到扩展实现的目的。

-

【RM】模块本地资源管理的介入

-

fescar针对本地事务相关的接口,通过代理机制都实现了一遍代理类,如数据源(DataSourceProxy)、ConnectionProxy、StatementProxy等。这个在配置文件中也可以看出来,也就是说,我们要使用fescar分布式事务,一定要配置fescar提供的代理数据源。如:

-

-

配置好代理数据源后,从DataSourceProxy出发,本地针对数据库的所有操作过程我们就可以随意控制了。从上面xid传递,已经知道了xid被保存在RootContext中了,那么请看下面的代码,就非常清楚了:

-

首先看StatementProxy的一段代码

-

-

在看ExecuteTemplate中的代码

-

-

和【TM】模块中的事务管理模板类TransactionlTemplate类似,这里非常关键的逻辑代理也被封装在了ExecuteTemplate模板类中。因重写了Statement有了StatementProxy实现,在执行原JDBC的executeUpdate方法时,会调用到ExecuteTemplate的execute逻辑。在sql真正执行前,会判断RootCOntext当前上下文中是否包含xid,也就是判断当前是否是全局分布式事务。如果不是,就直接使用本地事务,如果是,这里RM就会增加一些分布式事务相关的逻辑了。这里根据sql的不同的类型,fescar封装了五个不同的执行器来处理,分别是UpdateExecutor、DeleteExecutor、InsertExecutor、SelectForUpdateExecutor、PlainExecutor,结构如下图:

-

-

PlainExecutor:

-

原生的JDBC接口实现,未做任何处理,提供给全局事务中的普通的select查询使用

-

UpdateExecutor、DeleteExecutor、InsertExecutor:

-

三个DML增删改执行器实现,主要在sql执行的前后对sql语句进行了解析,实现了如下两个抽象接口方法:

-
protected abstract TableRecords beforeImage() throws SQLException;
-
-protected abstract TableRecords afterImage(TableRecords beforeImage) throws SQLException;
-
-

在这个过程中通过解析sql生成了提供回滚操作的undo_log日志,日志目前是保存在msyql中的,和业务sql操作共用同一个事务。表的结构如下:

-

-

rollback_info保存的undo_log详细信息,是longblob类型的,结构如下:

-
{
-    "branchId":3958194,
-    "sqlUndoLogs":[
-        {
-            "afterImage":{
-                "rows":[
-                    {
-                        "fields":[
-                            {
-                                "keyType":"PrimaryKey",
-                                "name":"ID",
-                                "type":4,
-                                "value":10
-                            },
-                            {
-                                "keyType":"NULL",
-                                "name":"COUNT",
-                                "type":4,
-                                "value":98
-                            }
-                        ]
-                    }
-                ],
-                "tableName":"storage_tbl"
-            },
-            "beforeImage":{
-                "rows":[
-                    {
-                        "fields":[
-                            {
-                                "keyType":"PrimaryKey",
-                                "name":"ID",
-                                "type":4,
-                                "value":10
-                            },
-                            {
-                                "keyType":"NULL",
-                                "name":"COUNT",
-                                "type":4,
-                                "value":100
-                            }
-                        ]
-                    }
-                ],
-                "tableName":"storage_tbl"
-            },
-            "sqlType":"UPDATE",
-            "tableName":"storage_tbl"
-        }
-    ],
-    "xid":"192.168.7.77:8091:3958193"
-}
-
-
-
-

这里贴的是一个update的操作,undo_log记录的非常的详细,通过全局事务xid关联branchid,记录数据操作的表名,操作字段名,以及sql执行前后的记录数,如这个记录,表名=storage_tbl,sql执行前ID=10,count=100,sql执行后id=10,count=98。如果整个全局事务失败,需要回滚的时候就可以生成:

-
update storage_tbl set count = 100 where id = 10;
-
-

这样的回滚sql语句执行了。

-

SelectForUpdateExecutor:

-

fescar的AT模式在本地事务之上默认支持读未提交的隔离级别,但是通过SelectForUpdateExecutor执行器,可以支持读已提交的隔离级别。代码如:

-
@Override
-public Object doExecute(Object... args) throws Throwable {
-    SQLSelectRecognizer recognizer = (SQLSelectRecognizer) sqlRecognizer;
-
-    Connection conn = statementProxy.getConnection();
-    ResultSet rs = null;
-    Savepoint sp = null;
-    LockRetryController lockRetryController = new LockRetryController();
-    boolean originalAutoCommit = conn.getAutoCommit();
-
-    StringBuffer selectSQLAppender = new StringBuffer("SELECT ");
-    selectSQLAppender.append(getTableMeta().getPkName());
-    selectSQLAppender.append(" FROM " + getTableMeta().getTableName());
-    String whereCondition = null;
-    ArrayList<Object> paramAppender = new ArrayList<>();
-    if (statementProxy instanceof ParametersHolder) {
-        whereCondition = recognizer.getWhereCondition((ParametersHolder) statementProxy, paramAppender);
-    } else {
-        whereCondition = recognizer.getWhereCondition();
-    }
-    if (!StringUtils.isEmpty(whereCondition)) {
-        selectSQLAppender.append(" WHERE " + whereCondition);
-    }
-    selectSQLAppender.append(" FOR UPDATE");
-    String selectPKSQL = selectSQLAppender.toString();
-
-    try {
-        if (originalAutoCommit) {
-            conn.setAutoCommit(false);
-        }
-        sp = conn.setSavepoint();
-        rs = statementCallback.execute(statementProxy.getTargetStatement(), args);
-
-        while (true) {
-            // Try to get global lock of those rows selected
-            Statement stPK = null;
-            PreparedStatement pstPK = null;
-            ResultSet rsPK = null;
-            try {
-                if (paramAppender.isEmpty()) {
-                    stPK = statementProxy.getConnection().createStatement();
-                    rsPK = stPK.executeQuery(selectPKSQL);
-                } else {
-                    pstPK = statementProxy.getConnection().prepareStatement(selectPKSQL);
-                    for (int i = 0; i < paramAppender.size(); i++) {
-                        pstPK.setObject(i + 1, paramAppender.get(i));
-                    }
-                    rsPK = pstPK.executeQuery();
-                }
-
-                TableRecords selectPKRows = TableRecords.buildRecords(getTableMeta(), rsPK);
-                statementProxy.getConnectionProxy().checkLock(selectPKRows);
-                break;
-
-            } catch (LockConflictException lce) {
-                conn.rollback(sp);
-                lockRetryController.sleep(lce);
-
-            } finally {
-                if (rsPK != null) {
-                    rsPK.close();
-                }
-                if (stPK != null) {
-                    stPK.close();
-                }
-                if (pstPK != null) {
-                    pstPK.close();
-                }
-            }
-        }
-
-    } finally {
-        if (sp != null) {
-            conn.releaseSavepoint(sp);
-        }
-        if (originalAutoCommit) {
-            conn.setAutoCommit(true);
-        }
-    }
-    return rs;
-}
-
-

关键代码见:

-
TableRecords selectPKRows = TableRecords.buildRecords(getTableMeta(), rsPK);
-statementProxy.getConnectionProxy().checkLock(selectPKRows);
-
-

通过selectPKRows表操作记录拿到lockKeys,然后到TC控制器端查询是否被全局锁定了,如果被锁定了,就重新尝试,直到锁释放返回查询结果。

-

分支事务的注册和上报

-

在本地事务提交前,fescar会注册和上报分支事务相关的信息,见ConnectionProxy类的commit部分代码:

-
@Override
-public void commit() throws SQLException {
-    if (context.inGlobalTransaction()) {
-        try {
-            register();
-        } catch (TransactionException e) {
-            recognizeLockKeyConflictException(e);
-        }
-
-        try {
-            if (context.hasUndoLog()) { 
-                UndoLogManager.flushUndoLogs(this);
-            }
-            targetConnection.commit();
-        } catch (Throwable ex) {
-            report(false);
-            if (ex instanceof SQLException) {
-                throw (SQLException) ex;
-            } else {
-                throw new SQLException(ex);
-            }
-        }
-        report(true);
-        context.reset();
-       
-    } else {
-        targetConnection.commit();
-    }
-}
-
-

从这段代码我们可以看到,首先是判断是了是否是全局事务,如果不是,就直接提交了,如果是,就先向TC控制器注册分支事务,为了写隔离,在TC端会涉及到全局锁的获取。然后保存了用于回滚操作的undo_log日志,继而真正提交本地事务,最后向TC控制器上报事务状态。此时,阶段一的本地事务已完成了。

-

【server】模块协调全局

-

关于server模块,我们可以聚焦在DefaultCoordinator这个类,这个是AbstractTCInboundHandler控制处理器默认实现。主要实现了全局事务开启,提交,回滚,状态查询,分支事务注册,上报,锁检查等接口,如:

-

-

回到一开始的TransactionlTemplate,如果整个分布式事务失败需要回滚了,首先是TM向TC发起回滚的指令,然后TC接收到后,解析请求后会被路由到默认控制器类的doGlobalRollback方法内,最终在TC控制器端执行的代码如下:

-
@Override
-public void doGlobalRollback(GlobalSession globalSession, boolean retrying) throws TransactionException {
-    for (BranchSession branchSession : globalSession.getReverseSortedBranches()) {
-        BranchStatus currentBranchStatus = branchSession.getStatus();
-        if (currentBranchStatus == BranchStatus.PhaseOne_Failed) {
-            continue;
-        }
-        try {
-            BranchStatus branchStatus = resourceManagerInbound.branchRollback(XID.generateXID(branchSession.getTransactionId()), branchSession.getBranchId(),
-                    branchSession.getResourceId(), branchSession.getApplicationData());
-
-            switch (branchStatus) {
-                case PhaseTwo_Rollbacked:
-                    globalSession.removeBranch(branchSession);
-                    LOGGER.error("Successfully rolled back branch " + branchSession);
-                    continue;
-                case PhaseTwo\_RollbackFailed\_Unretryable:
-                    GlobalStatus currentStatus = globalSession.getStatus();
-                    if (currentStatus.name().startsWith("Timeout")) {
-                        globalSession.changeStatus(GlobalStatus.TimeoutRollbackFailed);
-                    } else {
-                        globalSession.changeStatus(GlobalStatus.RollbackFailed);
-                    }
-                    globalSession.end();
-                    LOGGER.error("Failed to rollback global\[" + globalSession.getTransactionId() + "\] since branch\[" + branchSession.getBranchId() + "\] rollback failed");
-                    return;
-                default:
-                    LOGGER.info("Failed to rollback branch " + branchSession);
-                    if (!retrying) {
-                        queueToRetryRollback(globalSession);
-                    }
-                    return;
-
-            }
-        } catch (Exception ex) {
-            LOGGER.info("Exception rollbacking branch " + branchSession, ex);
-            if (!retrying) {
-                queueToRetryRollback(globalSession);
-                if (ex instanceof TransactionException) {
-                    throw (TransactionException) ex;
-                } else {
-                    throw new TransactionException(ex);
-                }
-            }
-
-        }
-
-    }
-    GlobalStatus currentStatus = globalSession.getStatus();
-    if (currentStatus.name().startsWith("Timeout")) {
-        globalSession.changeStatus(GlobalStatus.TimeoutRollbacked);
-    } else {
-        globalSession.changeStatus(GlobalStatus.Rollbacked);
-    }
-    globalSession.end();
-}
-
-

如上代码可以看到,回滚时从全局事务会话中迭代每个分支事务,然后通知每个分支事务回滚。分支服务接收到请求后,首先会被路由到RMHandlerAT中的doBranchRollback方法,继而调用了RM中的branchRollback方法,代码如下:

-
@Override
-public BranchStatus branchRollback(String xid, long branchId, String resourceId, String applicationData) throws TransactionException {
-    DataSourceProxy dataSourceProxy = get(resourceId);
-    if (dataSourceProxy == null) {
-        throw new ShouldNeverHappenException();
-    }
-    try {
-        UndoLogManager.undo(dataSourceProxy, xid, branchId);
-    } catch (TransactionException te) {
-        if (te.getCode() == TransactionExceptionCode.BranchRollbackFailed_Unretriable) {
-            return BranchStatus.PhaseTwo\_RollbackFailed\_Unretryable;
-        } else {
-            return BranchStatus.PhaseTwo\_RollbackFailed\_Retryable;
-        }
-    }
-    return BranchStatus.PhaseTwo_Rollbacked;
-}
-
-

RM分支事务端最后执行的是UndoLogManager的undo方法,通过xid和branchid从数据库查询出回滚日志,完成数据回滚操作,整个过程都是同步完成的。如果全局事务是成功的,TC也会有类似的上述协调过程,只不过是异步的将本次全局事务相关的undo_log清除了而已。至此,就完成了2阶段的提交或回滚,也就完成了完整的全局事务事务的控制。

-

结语

-

如果你看到这里,那么非常感谢你,在繁忙工作之余耐心的花时间来学习。同时,我相信花的时间没白费,完整的浏览理解估计对fescar实现的大致流程了解的十之八九了。本文从构思立题到完成大概耗时1人天左右,博主在这个过程中,对fescar的实现也有了更加深入的了解。由于篇幅原因,并没有面面俱到的对每个实现的细节去深究,如sql是如何解析的等,更多的是在fescar的TXC模型的实现过程的关键点做了详细阐述。本文已校对,但由于个人知识水平及精力有限,文中不免出现错误或理解不当的地方,欢迎指正。

-

作者简介:

-

陈凯玲,2016年5月加入凯京科技。曾任职高级研发和项目经理,现任凯京科技研发中心架构&运维部负责人。pmp项目管理认证,阿里云MVP。热爱开源,先后开源过多个热门项目。热爱分享技术点滴,独立博客KL博客(http://www.kailing.pub)博主。

-
- - - - - - - diff --git a/zh-cn/blog/seata-analysis-simple.json b/zh-cn/blog/seata-analysis-simple.json deleted file mode 100644 index d5f3d394..00000000 --- a/zh-cn/blog/seata-analysis-simple.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "filename": "seata-analysis-simple.md", - "__html": "

前言

\n

fescar发布已有时日,分布式事务一直是业界备受关注的领域,fescar发布一个月左右便受到了近5000个star足以说明其热度。当然,在fescar出来之前,\n已经有比较成熟的分布式事务的解决方案开源了,比较典型的方案如 LCN 的2pc型无侵入事务,\n目前lcn已发展到5.0,已支持和fescar事务模型类似的TCX型事务。还有如TCC型事务实现 hmily tcc-transaction 等。\n在微服务架构流行的当下、阿里这种开源大户背景下,fescar的发布无疑又掀起了研究分布式事务的热潮。fescar脱胎于阿里云商业分布式事务服务GTS,在线上环境提供这种公共服务其模式肯定经受了非常严苛的考验。其分布式事务模型TXC又仿于传统事务模型XA方案,主要区别在于资源管理器的定位一个在应用层一个在数据库层。博主觉得fescar的txc模型实现非常有研究的价值,所以今天我们来好好翻一翻fescar项目的代码。本文篇幅较长,浏览并理解本文大概耗时30~60分钟左右。

\n

项目地址

\n

fescar:https://github.com/alibaba/fescar

\n

本博文所述代码为fescar的0.1.2-SNAPSHOT版本,根据fescar后期的迭代计划,其项目结构和模块实现都可能有很大的改变,特此说明。

\n

fescar的TXC模型

\n

\"\"

\n

上图为fescar官方针对TXC模型制作的示意图。不得不说大厂的图制作的真的不错,结合示意图我们可以看到TXC实现的全貌。TXC的实现通过三个组件来完成。也就是上图的三个深黄色部分,其作用如下:

\n
    \n
  1. TM:全局事务管理器,在标注开启fescar分布式事务的服务端开启,并将全局事务发送到TC事务控制端管理
  2. \n
  3. TC:事务控制中心,控制全局事务的提交或者回滚。这个组件需要独立部署维护,目前只支持单机版本,后续迭代计划会有集群版本
  4. \n
  5. RM:资源管理器,主要负责分支事务的上报,本地事务的管理
  6. \n
\n

一段话简述其实现过程:服务起始方发起全局事务并注册到TC。在调用协同服务时,协同服务的事务分支事务会先完成阶段一的事务提交或回滚,并生成事务回滚的undo_log日志,同时注册当前协同服务到TC并上报其事务状态,归并到同一个业务的全局事务中。此时若没有问题继续下一个协同服务的调用,期间任何协同服务的分支事务回滚,都会通知到TC,TC在通知全局事务包含的所有已完成一阶段提交的分支事务回滚。如果所有分支事务都正常,最后回到全局事务发起方时,也会通知到TC,TC在通知全局事务包含的所有分支删除回滚日志。在这个过程中为了解决写隔离和度隔离的问题会涉及到TC管理的全局锁。

\n

本博文的目标是深入代码细节,探究其基本思路是如何实现的。首先会从项目的结构来简述每个模块的作用,继而结合官方自带的examples实例来探究整个分布式事务的实现过程。

\n

项目结构解析

\n

项目拉下来,用IDE打开后的目录结构如下,下面先大致的看下每个模块的实现

\n

\"\"

\n
    \n
  • common :公共组件,提供常用辅助类,静态变量、扩展机制类加载器、以及定义全局的异常等
  • \n
  • config : 配置加载解析模块,提供了配置的基础接口,目前只有文件配置实现,后续会有nacos等配置中心的实现
  • \n
  • core : 核心模块主要封装了TM、RM和TC通讯用RPC相关内容
  • \n
  • dubbo :dubbo模块主要适配dubbo通讯框架,使用dubbo的filter机制来传统全局事务的信息到分支
  • \n
  • examples :简单的演示实例模块,等下从这个模块入手探索
  • \n
  • rm-datasource :资源管理模块,比较核心的一个模块,个人认为这个模块命名为core要更合理一点。代理了JDBC的一些类,用来解析sql生成回滚日志、协调管理本地事务
  • \n
  • server : TC组件所在,主要协调管理全局事务,负责全局事务的提交或者回滚,同时管理维护全局锁。
  • \n
  • spring :和spring集成的模块,主要是aop逻辑,是整个分布式事务的入口,研究fescar的突破口
  • \n
  • tm : 全局事务事务管理模块,管理全局事务的边界,全局事务开启回滚点都在这个模块控制
  • \n
\n

通过【examples】模块的实例看下效果

\n

第一步、先启动TC也就是【Server】模块,main方法直接启动就好,默认服务端口8091

\n

第二步、回到examples模块,将订单,业务,账户、仓库四个服务的配置文件配置好,主要是mysql数据源和zookeeper连接地址,这里要注意下,默认dubbo的zk注册中心依赖没有,启动的时候回抛找不到class的异常,需要添加如下的依赖:

\n
<dependency>\n    <groupId>com.101tec</groupId>\n    <artifactId>zkclient</artifactId>\n    <version>0.10</version>\n    <exclusions>\n        <exclusion>\n            <artifactId>slf4j-log4j12</artifactId>\n            <groupId>org.slf4j</groupId>\n        </exclusion>\n    </exclusions>\n</dependency>\n
\n

第三步、在BusinessServiceImpl中的模拟抛异常的地方打个断点,依次启动OrderServiceImpl、StorageServiceImpl、AccountServiceImpl、BusinessServiceImpl四个服务、等进断点后,查看数据库account_tbl表,金额已减去400元,变成了599元。然后放开断点、BusinessServiceImpl模块模拟的异常触发,全局事务回滚,account_tbl表的金额就又回滚到999元了

\n

如上,我们已经体验到fescar事务的控制能力了,下面我们具体看下它是怎么控制的。

\n

fescar事务过程分析

\n

首先分析配置文件

\n

这个是一个铁律,任何一个技术或框架要集成,配置文件肯定是一个突破口。从上面的例子我们了解到,实例模块的配置文件中配置了一个全局事务扫描器实例,如:

\n
<bean class=\"com.alibaba.fescar.spring.annotation.GlobalTransactionScanner\">\n    <constructor-arg value=\"dubbo-demo-app\"/>\n    <constructor-arg value=\"my\\_test\\_tx_group\"/>\n</bean>\n
\n

这个实例在项目启动时会扫描所有实例,具体实现见【spring】模块。并将标注了@GlobalTransactional注解的方法织入GlobalTransactionalInterceptor的invoke方法逻辑。同时应用启动时,会初始化TM(TmRpcClient)和RM(RmRpcClient)的实例,这个时候,服务已经和TC事务控制中心勾搭上了。在往下看就涉及到TM模块的事务模板类TransactionalTemplate。

\n

【TM】模块启动全局事务

\n

全局事务的开启,提交、回滚都被封装在TransactionalTemplate中完成了,代码如:

\n
\npublic Object execute(TransactionalExecutor business) throws TransactionalExecutor.ExecutionException {\n    // 1. get or create a transaction\n    GlobalTransaction tx = GlobalTransactionContext.getCurrentOrCreate();\n    // 2. begin transaction\n    try {\n        tx.begin(business.timeout(), business.name());\n    } catch (TransactionException txe) {\n        throw new TransactionalExecutor.ExecutionException(tx, txe,\n            TransactionalExecutor.Code.BeginFailure);\n    }\n    Object rs = null;\n    try {\n        // Do Your Business\n        rs = business.execute();\n    } catch (Throwable ex) {\n        // 3. any business exception, rollback.\n        try {\n            tx.rollback();\n            // 3.1 Successfully rolled back\n            throw new TransactionalExecutor.ExecutionException(tx, TransactionalExecutor.Code.RollbackDone, ex);\n        } catch (TransactionException txe) {\n            // 3.2 Failed to rollback\n            throw new TransactionalExecutor.ExecutionException(tx, txe,\n                TransactionalExecutor.Code.RollbackFailure, ex);\n        }\n    }\n    // 4. everything is fine, commit.\n    try {\n        tx.commit();\n    } catch (TransactionException txe) {\n        // 4.1 Failed to commit\n        throw new TransactionalExecutor.ExecutionException(tx, txe,\n            TransactionalExecutor.Code.CommitFailure);\n    }\n    return rs;\n}\n
\n

更详细的实现在【TM】模块中被分成了两个Class实现,如下:

\n

DefaultGlobalTransaction :全局事务具体的开启,提交、回滚动作

\n

DefaultTransactionManager :负责使用TmRpcClient向TC控制中心发送指令,如开启全局事务(GlobalBeginRequest)、提交(GlobalCommitRequest)、回滚(GlobalRollbackRequest)、查询状态(GlobalStatusRequest)等。

\n

以上是TM模块核心内容点,TM模块完成全局事务开启后,接下来就开始看看全局事务iD,xid是如何传递、RM组件是如何介入的

\n

【dubbo】全局事务xid的传递

\n

首先是xid的传递,目前已经实现了dubbo框架实现的微服务架构下的传递,其他的像spring cloud和motan等的想要实现也很容易,通过一般RPC通讯框架都有的filter机制,将xid从全局事务的发起节点传递到服务协从节点,从节点接收到后绑定到当前线程上线文环境中,用于在分支事务执行sql时判断是否加入全局事务。fescar的实现见【dubbo】模块如下:

\n
@Activate(group = { Constants.PROVIDER, Constants.CONSUMER }, order = 100)\npublic class TransactionPropagationFilter implements Filter {\n\n    private static final Logger LOGGER = LoggerFactory.getLogger(TransactionPropagationFilter.class);\n\n    @Override\n    public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException {\n        String xid = RootContext.getXID();\n        String rpcXid = RpcContext.getContext().getAttachment(RootContext.KEY_XID);\n        if (LOGGER.isDebugEnabled()) {\n            LOGGER.debug(\"xid in RootContext\\[\" + xid + \"\\] xid in RpcContext\\[\" + rpcXid + \"\\]\");\n        }\n        boolean bind = false;\n        if (xid != null) {\n            RpcContext.getContext().setAttachment(RootContext.KEY_XID, xid);\n        } else {\n            if (rpcXid != null) {\n                RootContext.bind(rpcXid);\n                bind = true;\n                if (LOGGER.isDebugEnabled()) {\n                    LOGGER.debug(\"bind\\[\" + rpcXid + \"\\] to RootContext\");\n                }\n            }\n        }\n        try {\n            return invoker.invoke(invocation);\n\n        } finally {\n            if (bind) {\n                String unbindXid = RootContext.unbind();\n                if (LOGGER.isDebugEnabled()) {\n                    LOGGER.debug(\"unbind\\[\" + unbindXid + \"\\] from RootContext\");\n                }\n                if (!rpcXid.equalsIgnoreCase(unbindXid)) {\n                    LOGGER.warn(\"xid in change during RPC from \" + rpcXid + \" to \" + unbindXid);\n                    if (unbindXid != null) {\n                        RootContext.bind(unbindXid);\n                        LOGGER.warn(\"bind \\[\" + unbindXid + \"\\] back to RootContext\");\n                    }\n                }\n            }\n        }\n    }\n}\n
\n

上面代码rpcXid不为空时,就加入到了RootContext的ContextCore中,这里稍微深入讲下。ContextCore是一个可扩展实现的接口,目前默认的实现是ThreadLocalContextCore,基于ThreadLocal来保存维护当前的xid。这里fescar提供了可扩展的机制,实现在【common】模块中,通过一个自定义的类加载器EnhancedServiceLoader加载需要扩展的服务类,这样只需要在扩展类加上@LoadLevel注解。标记order属性声明高优先级别,就可以达到扩展实现的目的。

\n

【RM】模块本地资源管理的介入

\n

fescar针对本地事务相关的接口,通过代理机制都实现了一遍代理类,如数据源(DataSourceProxy)、ConnectionProxy、StatementProxy等。这个在配置文件中也可以看出来,也就是说,我们要使用fescar分布式事务,一定要配置fescar提供的代理数据源。如:

\n

\"\"

\n

配置好代理数据源后,从DataSourceProxy出发,本地针对数据库的所有操作过程我们就可以随意控制了。从上面xid传递,已经知道了xid被保存在RootContext中了,那么请看下面的代码,就非常清楚了:

\n

首先看StatementProxy的一段代码

\n

\"\"

\n

在看ExecuteTemplate中的代码

\n

\"\"

\n

和【TM】模块中的事务管理模板类TransactionlTemplate类似,这里非常关键的逻辑代理也被封装在了ExecuteTemplate模板类中。因重写了Statement有了StatementProxy实现,在执行原JDBC的executeUpdate方法时,会调用到ExecuteTemplate的execute逻辑。在sql真正执行前,会判断RootCOntext当前上下文中是否包含xid,也就是判断当前是否是全局分布式事务。如果不是,就直接使用本地事务,如果是,这里RM就会增加一些分布式事务相关的逻辑了。这里根据sql的不同的类型,fescar封装了五个不同的执行器来处理,分别是UpdateExecutor、DeleteExecutor、InsertExecutor、SelectForUpdateExecutor、PlainExecutor,结构如下图:

\n

\"\"

\n

PlainExecutor:

\n

原生的JDBC接口实现,未做任何处理,提供给全局事务中的普通的select查询使用

\n

UpdateExecutor、DeleteExecutor、InsertExecutor:

\n

三个DML增删改执行器实现,主要在sql执行的前后对sql语句进行了解析,实现了如下两个抽象接口方法:

\n
protected abstract TableRecords beforeImage() throws SQLException;\n\nprotected abstract TableRecords afterImage(TableRecords beforeImage) throws SQLException;\n
\n

在这个过程中通过解析sql生成了提供回滚操作的undo_log日志,日志目前是保存在msyql中的,和业务sql操作共用同一个事务。表的结构如下:

\n

\"\"

\n

rollback_info保存的undo_log详细信息,是longblob类型的,结构如下:

\n
{\n    \"branchId\":3958194,\n    \"sqlUndoLogs\":[\n        {\n            \"afterImage\":{\n                \"rows\":[\n                    {\n                        \"fields\":[\n                            {\n                                \"keyType\":\"PrimaryKey\",\n                                \"name\":\"ID\",\n                                \"type\":4,\n                                \"value\":10\n                            },\n                            {\n                                \"keyType\":\"NULL\",\n                                \"name\":\"COUNT\",\n                                \"type\":4,\n                                \"value\":98\n                            }\n                        ]\n                    }\n                ],\n                \"tableName\":\"storage_tbl\"\n            },\n            \"beforeImage\":{\n                \"rows\":[\n                    {\n                        \"fields\":[\n                            {\n                                \"keyType\":\"PrimaryKey\",\n                                \"name\":\"ID\",\n                                \"type\":4,\n                                \"value\":10\n                            },\n                            {\n                                \"keyType\":\"NULL\",\n                                \"name\":\"COUNT\",\n                                \"type\":4,\n                                \"value\":100\n                            }\n                        ]\n                    }\n                ],\n                \"tableName\":\"storage_tbl\"\n            },\n            \"sqlType\":\"UPDATE\",\n            \"tableName\":\"storage_tbl\"\n        }\n    ],\n    \"xid\":\"192.168.7.77:8091:3958193\"\n}\n\n\n
\n

这里贴的是一个update的操作,undo_log记录的非常的详细,通过全局事务xid关联branchid,记录数据操作的表名,操作字段名,以及sql执行前后的记录数,如这个记录,表名=storage_tbl,sql执行前ID=10,count=100,sql执行后id=10,count=98。如果整个全局事务失败,需要回滚的时候就可以生成:

\n
update storage_tbl set count = 100 where id = 10;\n
\n

这样的回滚sql语句执行了。

\n

SelectForUpdateExecutor:

\n

fescar的AT模式在本地事务之上默认支持读未提交的隔离级别,但是通过SelectForUpdateExecutor执行器,可以支持读已提交的隔离级别。代码如:

\n
@Override\npublic Object doExecute(Object... args) throws Throwable {\n    SQLSelectRecognizer recognizer = (SQLSelectRecognizer) sqlRecognizer;\n\n    Connection conn = statementProxy.getConnection();\n    ResultSet rs = null;\n    Savepoint sp = null;\n    LockRetryController lockRetryController = new LockRetryController();\n    boolean originalAutoCommit = conn.getAutoCommit();\n\n    StringBuffer selectSQLAppender = new StringBuffer(\"SELECT \");\n    selectSQLAppender.append(getTableMeta().getPkName());\n    selectSQLAppender.append(\" FROM \" + getTableMeta().getTableName());\n    String whereCondition = null;\n    ArrayList<Object> paramAppender = new ArrayList<>();\n    if (statementProxy instanceof ParametersHolder) {\n        whereCondition = recognizer.getWhereCondition((ParametersHolder) statementProxy, paramAppender);\n    } else {\n        whereCondition = recognizer.getWhereCondition();\n    }\n    if (!StringUtils.isEmpty(whereCondition)) {\n        selectSQLAppender.append(\" WHERE \" + whereCondition);\n    }\n    selectSQLAppender.append(\" FOR UPDATE\");\n    String selectPKSQL = selectSQLAppender.toString();\n\n    try {\n        if (originalAutoCommit) {\n            conn.setAutoCommit(false);\n        }\n        sp = conn.setSavepoint();\n        rs = statementCallback.execute(statementProxy.getTargetStatement(), args);\n\n        while (true) {\n            // Try to get global lock of those rows selected\n            Statement stPK = null;\n            PreparedStatement pstPK = null;\n            ResultSet rsPK = null;\n            try {\n                if (paramAppender.isEmpty()) {\n                    stPK = statementProxy.getConnection().createStatement();\n                    rsPK = stPK.executeQuery(selectPKSQL);\n                } else {\n                    pstPK = statementProxy.getConnection().prepareStatement(selectPKSQL);\n                    for (int i = 0; i < paramAppender.size(); i++) {\n                        pstPK.setObject(i + 1, paramAppender.get(i));\n                    }\n                    rsPK = pstPK.executeQuery();\n                }\n\n                TableRecords selectPKRows = TableRecords.buildRecords(getTableMeta(), rsPK);\n                statementProxy.getConnectionProxy().checkLock(selectPKRows);\n                break;\n\n            } catch (LockConflictException lce) {\n                conn.rollback(sp);\n                lockRetryController.sleep(lce);\n\n            } finally {\n                if (rsPK != null) {\n                    rsPK.close();\n                }\n                if (stPK != null) {\n                    stPK.close();\n                }\n                if (pstPK != null) {\n                    pstPK.close();\n                }\n            }\n        }\n\n    } finally {\n        if (sp != null) {\n            conn.releaseSavepoint(sp);\n        }\n        if (originalAutoCommit) {\n            conn.setAutoCommit(true);\n        }\n    }\n    return rs;\n}\n
\n

关键代码见:

\n
TableRecords selectPKRows = TableRecords.buildRecords(getTableMeta(), rsPK);\nstatementProxy.getConnectionProxy().checkLock(selectPKRows);\n
\n

通过selectPKRows表操作记录拿到lockKeys,然后到TC控制器端查询是否被全局锁定了,如果被锁定了,就重新尝试,直到锁释放返回查询结果。

\n

分支事务的注册和上报

\n

在本地事务提交前,fescar会注册和上报分支事务相关的信息,见ConnectionProxy类的commit部分代码:

\n
@Override\npublic void commit() throws SQLException {\n    if (context.inGlobalTransaction()) {\n        try {\n            register();\n        } catch (TransactionException e) {\n            recognizeLockKeyConflictException(e);\n        }\n\n        try {\n            if (context.hasUndoLog()) { \n                UndoLogManager.flushUndoLogs(this);\n            }\n            targetConnection.commit();\n        } catch (Throwable ex) {\n            report(false);\n            if (ex instanceof SQLException) {\n                throw (SQLException) ex;\n            } else {\n                throw new SQLException(ex);\n            }\n        }\n        report(true);\n        context.reset();\n       \n    } else {\n        targetConnection.commit();\n    }\n}\n
\n

从这段代码我们可以看到,首先是判断是了是否是全局事务,如果不是,就直接提交了,如果是,就先向TC控制器注册分支事务,为了写隔离,在TC端会涉及到全局锁的获取。然后保存了用于回滚操作的undo_log日志,继而真正提交本地事务,最后向TC控制器上报事务状态。此时,阶段一的本地事务已完成了。

\n

【server】模块协调全局

\n

关于server模块,我们可以聚焦在DefaultCoordinator这个类,这个是AbstractTCInboundHandler控制处理器默认实现。主要实现了全局事务开启,提交,回滚,状态查询,分支事务注册,上报,锁检查等接口,如:

\n

\"\"

\n

回到一开始的TransactionlTemplate,如果整个分布式事务失败需要回滚了,首先是TM向TC发起回滚的指令,然后TC接收到后,解析请求后会被路由到默认控制器类的doGlobalRollback方法内,最终在TC控制器端执行的代码如下:

\n
@Override\npublic void doGlobalRollback(GlobalSession globalSession, boolean retrying) throws TransactionException {\n    for (BranchSession branchSession : globalSession.getReverseSortedBranches()) {\n        BranchStatus currentBranchStatus = branchSession.getStatus();\n        if (currentBranchStatus == BranchStatus.PhaseOne_Failed) {\n            continue;\n        }\n        try {\n            BranchStatus branchStatus = resourceManagerInbound.branchRollback(XID.generateXID(branchSession.getTransactionId()), branchSession.getBranchId(),\n                    branchSession.getResourceId(), branchSession.getApplicationData());\n\n            switch (branchStatus) {\n                case PhaseTwo_Rollbacked:\n                    globalSession.removeBranch(branchSession);\n                    LOGGER.error(\"Successfully rolled back branch \" + branchSession);\n                    continue;\n                case PhaseTwo\\_RollbackFailed\\_Unretryable:\n                    GlobalStatus currentStatus = globalSession.getStatus();\n                    if (currentStatus.name().startsWith(\"Timeout\")) {\n                        globalSession.changeStatus(GlobalStatus.TimeoutRollbackFailed);\n                    } else {\n                        globalSession.changeStatus(GlobalStatus.RollbackFailed);\n                    }\n                    globalSession.end();\n                    LOGGER.error(\"Failed to rollback global\\[\" + globalSession.getTransactionId() + \"\\] since branch\\[\" + branchSession.getBranchId() + \"\\] rollback failed\");\n                    return;\n                default:\n                    LOGGER.info(\"Failed to rollback branch \" + branchSession);\n                    if (!retrying) {\n                        queueToRetryRollback(globalSession);\n                    }\n                    return;\n\n            }\n        } catch (Exception ex) {\n            LOGGER.info(\"Exception rollbacking branch \" + branchSession, ex);\n            if (!retrying) {\n                queueToRetryRollback(globalSession);\n                if (ex instanceof TransactionException) {\n                    throw (TransactionException) ex;\n                } else {\n                    throw new TransactionException(ex);\n                }\n            }\n\n        }\n\n    }\n    GlobalStatus currentStatus = globalSession.getStatus();\n    if (currentStatus.name().startsWith(\"Timeout\")) {\n        globalSession.changeStatus(GlobalStatus.TimeoutRollbacked);\n    } else {\n        globalSession.changeStatus(GlobalStatus.Rollbacked);\n    }\n    globalSession.end();\n}\n
\n

如上代码可以看到,回滚时从全局事务会话中迭代每个分支事务,然后通知每个分支事务回滚。分支服务接收到请求后,首先会被路由到RMHandlerAT中的doBranchRollback方法,继而调用了RM中的branchRollback方法,代码如下:

\n
@Override\npublic BranchStatus branchRollback(String xid, long branchId, String resourceId, String applicationData) throws TransactionException {\n    DataSourceProxy dataSourceProxy = get(resourceId);\n    if (dataSourceProxy == null) {\n        throw new ShouldNeverHappenException();\n    }\n    try {\n        UndoLogManager.undo(dataSourceProxy, xid, branchId);\n    } catch (TransactionException te) {\n        if (te.getCode() == TransactionExceptionCode.BranchRollbackFailed_Unretriable) {\n            return BranchStatus.PhaseTwo\\_RollbackFailed\\_Unretryable;\n        } else {\n            return BranchStatus.PhaseTwo\\_RollbackFailed\\_Retryable;\n        }\n    }\n    return BranchStatus.PhaseTwo_Rollbacked;\n}\n
\n

RM分支事务端最后执行的是UndoLogManager的undo方法,通过xid和branchid从数据库查询出回滚日志,完成数据回滚操作,整个过程都是同步完成的。如果全局事务是成功的,TC也会有类似的上述协调过程,只不过是异步的将本次全局事务相关的undo_log清除了而已。至此,就完成了2阶段的提交或回滚,也就完成了完整的全局事务事务的控制。

\n

结语

\n

如果你看到这里,那么非常感谢你,在繁忙工作之余耐心的花时间来学习。同时,我相信花的时间没白费,完整的浏览理解估计对fescar实现的大致流程了解的十之八九了。本文从构思立题到完成大概耗时1人天左右,博主在这个过程中,对fescar的实现也有了更加深入的了解。由于篇幅原因,并没有面面俱到的对每个实现的细节去深究,如sql是如何解析的等,更多的是在fescar的TXC模型的实现过程的关键点做了详细阐述。本文已校对,但由于个人知识水平及精力有限,文中不免出现错误或理解不当的地方,欢迎指正。

\n

作者简介:

\n

陈凯玲,2016年5月加入凯京科技。曾任职高级研发和项目经理,现任凯京科技研发中心架构&运维部负责人。pmp项目管理认证,阿里云MVP。热爱开源,先后开源过多个热门项目。热爱分享技术点滴,独立博客KL博客(http://www.kailing.pub)博主。

\n", - "link": "/zh-cn/blog/seata-analysis-simple.html", - "meta": { - "title": "Fescar分布式事务原理解析探秘", - "author": "陈凯玲", - "keywords": "Fescar、分布式事务", - "date": "2019/02/18" - } -} \ No newline at end of file diff --git a/zh-cn/blog/seata-at-mode-design.html b/zh-cn/blog/seata-at-mode-design.html deleted file mode 100644 index 4c4d8311..00000000 --- a/zh-cn/blog/seata-at-mode-design.html +++ /dev/null @@ -1,92 +0,0 @@ - - - - - - - - - - 分布式事务中间件 Seata 的设计原理 - - - - -

前言

-

在微服务架构体系下,我们可以按照业务模块分层设计,单独部署,减轻了服务部署压力,也解耦了业务的耦合,避免了应用逐渐变成一个庞然怪物,从而可以轻松扩展,在某些服务出现故障时也不会影响其它服务的正常运行。总之,微服务在业务的高速发展中带给我们越来越多的优势,但是微服务并不是十全十美,因此不能盲目过度滥用,它有很多不足,而且会给系统带来一定的复杂度,其中伴随而来的分布式事务问题,是微服务架构体系下必然需要处理的一个痛点,也是业界一直关注的一个领域,因此也出现了诸如 CAP 和 BASE 等理论。

-

在今年年初,阿里开源了一个分布式事务中间件,起初起名为 Fescar,后改名为 Seata,在它开源之初,我就知道它肯定要火,因为这是一个解决痛点的开源项目,Seata 一开始就是冲着对业务无侵入与高性能方向走,这正是我们对解决分布式事务问题迫切的需求。因为待过的几家公司,用的都是微服务架构,但是在解决分布式事务的问题上都不太优雅,所以我也在一直关注 Seata 的发展,今天就简要说说它的一些设计上的原理,后续我将会对它的各个模块进行深入源码分析,感兴趣的可以持续关注我的公众号或者博客,不要跟丢。

-

分布式事务解决的方案有哪些?

-

目前分布式事务解决的方案主要有对业务无入侵和有入侵的方案,无入侵方案主要有基于数据库 XA 协议的两段式提交(2PC)方案,它的优点是对业务代码无入侵,但是它的缺点也是很明显:必须要求数据库对 XA 协议的支持,且由于 XA 协议自身的特点,它会造成事务资源长时间得不到释放,锁定周期长,而且在应用层上面无法干预,因此它性能很差,它的存在相当于七伤拳那样“伤人七分,损己三分”,因此在互联网项目中并不是很流行这种解决方案。

-

为了这个弥补这种方案带来性能低的问题,大佬们又想出了很多种方案来解决,但这无一例外都需要通过在应用层做手脚,即入侵业务的方式,比如很出名的 TCC 方案,基于 TCC 也有很多成熟的框架,如 ByteTCC、tcc-transaction 等。以及基于可靠消息的最终一致性来实现,如 RocketMQ 的事务消息。

-

入侵代码的方案是基于现有情形“迫不得已”才推出的解决方案,实际上它们实现起来非常不优雅,一个事务的调用通常伴随而来的是对该事务接口增加一系列的反向操作,比如 TCC 三段式提交,提交逻辑必然伴随着回滚的逻辑,这样的代码会使得项目非常臃肿,维护成本高。

-

Seata 各模块之间的关系

-

针对上面所说的分布式事务解决方案的痛点,那很显然,我们理想的分布式事务解决方案肯定是性能要好而且要对业务无入侵,业务层上无需关心分布式事务机制的约束,Seata 正是往这个方向发展的,因此它非常值得期待,它将给我们的微服务架构带来质的提升。

-

那 Seata 是怎么做到的呢?下面说说它的各个模块之间的关系。

-

Seata 的设计思路是将一个分布式事务可以理解成一个全局事务,下面挂了若干个分支事务,而一个分支事务是一个满足 ACID 的本地事务,因此我们可以操作分布式事务像操作本地事务一样。

-

Seata 内部定义了 3个模块来处理全局事务和分支事务的关系和处理过程,这三个组件分别是:

-
    -
  • Transaction Coordinator (TC): 事务协调器,维护全局事务的运行状态,负责协调并驱动全局事务的提交或回滚。
  • -
  • Transaction Manager (TM): 控制全局事务的边界,负责开启一个全局事务,并最终发起全局提交或全局回滚的决议。
  • -
  • Resource Manager (RM): 控制分支事务,负责分支注册、状态汇报,并接收事务协调器的指令,驱动分支(本地)事务的提交和回滚。
  • -
-

-

简要说说整个全局事务的执行步骤:

-
    -
  1. TM 向 TC 申请开启一个全局事务,TC 创建全局事务后返回全局唯一的 XID,XID 会在全局事务的上下文中传播;
  2. -
  3. RM 向 TC 注册分支事务,该分支事务归属于拥有相同 XID 的全局事务;
  4. -
  5. TM 向 TC 发起全局提交或回滚;
  6. -
  7. TC 调度 XID 下的分支事务完成提交或者回滚。
  8. -
-

与 XA 方案有什么不同?

-

Seata 的事务提交方式跟 XA 协议的两段式提交在总体上来说基本是一致的,那它们之间有什么不同呢?

-

我们都知道 XA 协议它依赖的是数据库层面来保障事务的一致性,也即是说 XA 的各个分支事务是在数据库层面上驱动的,由于 XA 的各个分支事务需要有 XA 的驱动程序,一方面会导致数据库与 XA 驱动耦合,另一方面它会导致各个分支的事务资源锁定周期长,这也是它没有在互联网公司流行的重要因素。

-

基于 XA 协议以上的问题,Seata 另辟蹊径,既然在依赖数据库层会导致这么多问题,那我就从应用层做手脚,这还得从 Seata 的 RM 模块说起,前面也说过 RM 的主要作用了,其实 RM 在内部做了对数据库操作的代理层,如下:

-

-

Seata 在数据源做了一层代理层,所以我们使用 Seata 时,我们使用的数据源实际上用的是 Seata 自带的数据源代理 DataSourceProxy,Seata 在这层代理中加入了很多逻辑,主要是解析 SQL,把业务数据在更新前后的数据镜像组织成回滚日志,并将 undo log 日志插入 undo_log 表中,保证每条更新数据的业务 sql 都有对应的回滚日志存在。

-

这样做的好处就是,本地事务执行完可以立即释放本地事务锁定的资源,然后向 TC 上报分支状态。当 TM 决议全局提交时,就不需要同步协调处理了,TC 会异步调度各个 RM 分支事务删除对应的 undo log 日志即可,这个步骤非常快速地可以完成;当 TM 决议全局回滚时,RM 收到 TC 发送的回滚请求,RM 通过 XID 找到对应的 undo log 回滚日志,然后执行回滚日志完成回滚操作。

-

-

如上图所示,XA 方案的 RM 是放在数据库层的,它依赖了数据库的 XA 驱动程序。

-

-

如上图所示,Seata 的 RM 实际上是已中间件的形式放在应用层,不用依赖数据库对协议的支持,完全剥离了分布式事务方案对数据库在协议支持上的要求。

-

分支事务如何提交和回滚?

-

下面详细说说分支事务是如何提交和回滚的:

-
    -
  • 第一阶段:
  • -
-

分支事务利用 RM 模块中对 JDBC 数据源代理,加入了若干流程,对业务 SQL 进行解释,把业务数据在更新前后的数据镜像组织成回滚日志,并生成 undo log 日志,对全局事务锁的检查以及分支事务的注册等,利用本地事务 ACID 特性,将业务 SQL 和 undo log 写入同一个事物中一同提交到数据库中,保证业务 SQL 必定存在相应的回滚日志,最后对分支事务状态向 TC 进行上报。

-

-
    -
  • 第二阶段:
  • -
-

TM决议全局提交:

-

当 TM 决议提交时,就不需要同步协调处理了,TC 会异步调度各个 RM 分支事务删除对应的 undo log 日志即可,这个步骤非常快速地可以完成。这个机制对于性能提升非常关键,我们知道正常的业务运行过程中,事务执行的成功率是非常高的,因此可以直接在本地事务中提交,这步对于提升性能非常显著。

-

-

TM决议全局回滚:

-

当 TM 决议回滚时,RM 收到 TC 发送的回滚请求,RM 通过 XID 找到对应的 undo log 回滚日志,然后利用本地事务 ACID 特性,执行回滚日志完成回滚操作并删除 undo log 日志,最后向 TC 进行回滚结果上报。

-

-

业务对以上所有的流程都无感知,业务完全不关心全局事务的具体提交和回滚,而且最重要的一点是 Seata 将两段式提交的同步协调分解到各个分支事务中了,分支事务与普通的本地事务无任何差异,这意味着我们使用 Seata 后,分布式事务就像使用本地事务一样,完全将数据库层的事务协调机制交给了中间件层 Seata 去做了,这样虽然事务协调搬到应用层了,但是依然可以做到对业务的零侵入,从而剥离了分布式事务方案对数据库在协议支持上的要求,且 Seata 在分支事务完成之后直接释放资源,极大减少了分支事务对资源的锁定时间,完美避免了 XA 协议需要同步协调导致资源锁定时间过长的问题。

-

其它方案的补充

-

上面说的其实是 Seata 的默认模式,也叫 AT 模式,它是类似于 XA 方案的两段式提交方案,并且是对业务无侵入,但是这种机制依然是需要依赖数据库本地事务的 ACID 特性,有没有发现,我在上面的图中都强调了必须是支持 ACID 特性的关系型数据库,那么问题就来了,非关系型或者不支持 ACID 的数据库就无法使用 Seata 了,别慌,Seata 现阶段为我们准备了另外一种模式,叫 MT 模式,它是一种对业务有入侵的方案,提交回滚等操作需要我们自行定义,业务逻辑需要被分解为 Prepare/Commit/Rollback 3 部分,形成一个 MT 分支,加入全局事务,它存在的意义是为 Seata 触达更多的场景。

-

-

只不过,它不是 Seata “主打”的模式,它的存在仅仅作为补充的方案,从以上官方的发展远景就可以看出来,Seata 的目标是始终是对业务无入侵的方案。

-

注:本文图片设计参考Seata官方图

-

作者简介:

-

张乘辉,目前就职于中通科技信息中心技术平台部,担任 Java 工程师,主要负责中通消息平台与全链路压测项目的研发,热爱分享技术,微信公众号「后端进阶」作者,技术博客(https://objcoding.com/)博主,Seata Contributor,GitHub ID:objcoding。

-
- - - - - - - diff --git a/zh-cn/blog/seata-at-mode-design.json b/zh-cn/blog/seata-at-mode-design.json deleted file mode 100644 index 223a7709..00000000 --- a/zh-cn/blog/seata-at-mode-design.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "filename": "seata-at-mode-design.md", - "__html": "

前言

\n

在微服务架构体系下,我们可以按照业务模块分层设计,单独部署,减轻了服务部署压力,也解耦了业务的耦合,避免了应用逐渐变成一个庞然怪物,从而可以轻松扩展,在某些服务出现故障时也不会影响其它服务的正常运行。总之,微服务在业务的高速发展中带给我们越来越多的优势,但是微服务并不是十全十美,因此不能盲目过度滥用,它有很多不足,而且会给系统带来一定的复杂度,其中伴随而来的分布式事务问题,是微服务架构体系下必然需要处理的一个痛点,也是业界一直关注的一个领域,因此也出现了诸如 CAP 和 BASE 等理论。

\n

在今年年初,阿里开源了一个分布式事务中间件,起初起名为 Fescar,后改名为 Seata,在它开源之初,我就知道它肯定要火,因为这是一个解决痛点的开源项目,Seata 一开始就是冲着对业务无侵入与高性能方向走,这正是我们对解决分布式事务问题迫切的需求。因为待过的几家公司,用的都是微服务架构,但是在解决分布式事务的问题上都不太优雅,所以我也在一直关注 Seata 的发展,今天就简要说说它的一些设计上的原理,后续我将会对它的各个模块进行深入源码分析,感兴趣的可以持续关注我的公众号或者博客,不要跟丢。

\n

分布式事务解决的方案有哪些?

\n

目前分布式事务解决的方案主要有对业务无入侵和有入侵的方案,无入侵方案主要有基于数据库 XA 协议的两段式提交(2PC)方案,它的优点是对业务代码无入侵,但是它的缺点也是很明显:必须要求数据库对 XA 协议的支持,且由于 XA 协议自身的特点,它会造成事务资源长时间得不到释放,锁定周期长,而且在应用层上面无法干预,因此它性能很差,它的存在相当于七伤拳那样“伤人七分,损己三分”,因此在互联网项目中并不是很流行这种解决方案。

\n

为了这个弥补这种方案带来性能低的问题,大佬们又想出了很多种方案来解决,但这无一例外都需要通过在应用层做手脚,即入侵业务的方式,比如很出名的 TCC 方案,基于 TCC 也有很多成熟的框架,如 ByteTCC、tcc-transaction 等。以及基于可靠消息的最终一致性来实现,如 RocketMQ 的事务消息。

\n

入侵代码的方案是基于现有情形“迫不得已”才推出的解决方案,实际上它们实现起来非常不优雅,一个事务的调用通常伴随而来的是对该事务接口增加一系列的反向操作,比如 TCC 三段式提交,提交逻辑必然伴随着回滚的逻辑,这样的代码会使得项目非常臃肿,维护成本高。

\n

Seata 各模块之间的关系

\n

针对上面所说的分布式事务解决方案的痛点,那很显然,我们理想的分布式事务解决方案肯定是性能要好而且要对业务无入侵,业务层上无需关心分布式事务机制的约束,Seata 正是往这个方向发展的,因此它非常值得期待,它将给我们的微服务架构带来质的提升。

\n

那 Seata 是怎么做到的呢?下面说说它的各个模块之间的关系。

\n

Seata 的设计思路是将一个分布式事务可以理解成一个全局事务,下面挂了若干个分支事务,而一个分支事务是一个满足 ACID 的本地事务,因此我们可以操作分布式事务像操作本地事务一样。

\n

Seata 内部定义了 3个模块来处理全局事务和分支事务的关系和处理过程,这三个组件分别是:

\n
    \n
  • Transaction Coordinator (TC): 事务协调器,维护全局事务的运行状态,负责协调并驱动全局事务的提交或回滚。
  • \n
  • Transaction Manager (TM): 控制全局事务的边界,负责开启一个全局事务,并最终发起全局提交或全局回滚的决议。
  • \n
  • Resource Manager (RM): 控制分支事务,负责分支注册、状态汇报,并接收事务协调器的指令,驱动分支(本地)事务的提交和回滚。
  • \n
\n

\"\"

\n

简要说说整个全局事务的执行步骤:

\n
    \n
  1. TM 向 TC 申请开启一个全局事务,TC 创建全局事务后返回全局唯一的 XID,XID 会在全局事务的上下文中传播;
  2. \n
  3. RM 向 TC 注册分支事务,该分支事务归属于拥有相同 XID 的全局事务;
  4. \n
  5. TM 向 TC 发起全局提交或回滚;
  6. \n
  7. TC 调度 XID 下的分支事务完成提交或者回滚。
  8. \n
\n

与 XA 方案有什么不同?

\n

Seata 的事务提交方式跟 XA 协议的两段式提交在总体上来说基本是一致的,那它们之间有什么不同呢?

\n

我们都知道 XA 协议它依赖的是数据库层面来保障事务的一致性,也即是说 XA 的各个分支事务是在数据库层面上驱动的,由于 XA 的各个分支事务需要有 XA 的驱动程序,一方面会导致数据库与 XA 驱动耦合,另一方面它会导致各个分支的事务资源锁定周期长,这也是它没有在互联网公司流行的重要因素。

\n

基于 XA 协议以上的问题,Seata 另辟蹊径,既然在依赖数据库层会导致这么多问题,那我就从应用层做手脚,这还得从 Seata 的 RM 模块说起,前面也说过 RM 的主要作用了,其实 RM 在内部做了对数据库操作的代理层,如下:

\n

\"\"

\n

Seata 在数据源做了一层代理层,所以我们使用 Seata 时,我们使用的数据源实际上用的是 Seata 自带的数据源代理 DataSourceProxy,Seata 在这层代理中加入了很多逻辑,主要是解析 SQL,把业务数据在更新前后的数据镜像组织成回滚日志,并将 undo log 日志插入 undo_log 表中,保证每条更新数据的业务 sql 都有对应的回滚日志存在。

\n

这样做的好处就是,本地事务执行完可以立即释放本地事务锁定的资源,然后向 TC 上报分支状态。当 TM 决议全局提交时,就不需要同步协调处理了,TC 会异步调度各个 RM 分支事务删除对应的 undo log 日志即可,这个步骤非常快速地可以完成;当 TM 决议全局回滚时,RM 收到 TC 发送的回滚请求,RM 通过 XID 找到对应的 undo log 回滚日志,然后执行回滚日志完成回滚操作。

\n

\"\"

\n

如上图所示,XA 方案的 RM 是放在数据库层的,它依赖了数据库的 XA 驱动程序。

\n

\"\"

\n

如上图所示,Seata 的 RM 实际上是已中间件的形式放在应用层,不用依赖数据库对协议的支持,完全剥离了分布式事务方案对数据库在协议支持上的要求。

\n

分支事务如何提交和回滚?

\n

下面详细说说分支事务是如何提交和回滚的:

\n
    \n
  • 第一阶段:
  • \n
\n

分支事务利用 RM 模块中对 JDBC 数据源代理,加入了若干流程,对业务 SQL 进行解释,把业务数据在更新前后的数据镜像组织成回滚日志,并生成 undo log 日志,对全局事务锁的检查以及分支事务的注册等,利用本地事务 ACID 特性,将业务 SQL 和 undo log 写入同一个事物中一同提交到数据库中,保证业务 SQL 必定存在相应的回滚日志,最后对分支事务状态向 TC 进行上报。

\n

\"\"

\n
    \n
  • 第二阶段:
  • \n
\n

TM决议全局提交:

\n

当 TM 决议提交时,就不需要同步协调处理了,TC 会异步调度各个 RM 分支事务删除对应的 undo log 日志即可,这个步骤非常快速地可以完成。这个机制对于性能提升非常关键,我们知道正常的业务运行过程中,事务执行的成功率是非常高的,因此可以直接在本地事务中提交,这步对于提升性能非常显著。

\n

\"\"

\n

TM决议全局回滚:

\n

当 TM 决议回滚时,RM 收到 TC 发送的回滚请求,RM 通过 XID 找到对应的 undo log 回滚日志,然后利用本地事务 ACID 特性,执行回滚日志完成回滚操作并删除 undo log 日志,最后向 TC 进行回滚结果上报。

\n

\"\"

\n

业务对以上所有的流程都无感知,业务完全不关心全局事务的具体提交和回滚,而且最重要的一点是 Seata 将两段式提交的同步协调分解到各个分支事务中了,分支事务与普通的本地事务无任何差异,这意味着我们使用 Seata 后,分布式事务就像使用本地事务一样,完全将数据库层的事务协调机制交给了中间件层 Seata 去做了,这样虽然事务协调搬到应用层了,但是依然可以做到对业务的零侵入,从而剥离了分布式事务方案对数据库在协议支持上的要求,且 Seata 在分支事务完成之后直接释放资源,极大减少了分支事务对资源的锁定时间,完美避免了 XA 协议需要同步协调导致资源锁定时间过长的问题。

\n

其它方案的补充

\n

上面说的其实是 Seata 的默认模式,也叫 AT 模式,它是类似于 XA 方案的两段式提交方案,并且是对业务无侵入,但是这种机制依然是需要依赖数据库本地事务的 ACID 特性,有没有发现,我在上面的图中都强调了必须是支持 ACID 特性的关系型数据库,那么问题就来了,非关系型或者不支持 ACID 的数据库就无法使用 Seata 了,别慌,Seata 现阶段为我们准备了另外一种模式,叫 MT 模式,它是一种对业务有入侵的方案,提交回滚等操作需要我们自行定义,业务逻辑需要被分解为 Prepare/Commit/Rollback 3 部分,形成一个 MT 分支,加入全局事务,它存在的意义是为 Seata 触达更多的场景。

\n

\"\"

\n

只不过,它不是 Seata “主打”的模式,它的存在仅仅作为补充的方案,从以上官方的发展远景就可以看出来,Seata 的目标是始终是对业务无入侵的方案。

\n

注:本文图片设计参考Seata官方图

\n

作者简介:

\n

张乘辉,目前就职于中通科技信息中心技术平台部,担任 Java 工程师,主要负责中通消息平台与全链路压测项目的研发,热爱分享技术,微信公众号「后端进阶」作者,技术博客(https://objcoding.com/)博主,Seata Contributor,GitHub ID:objcoding。

\n", - "link": "/zh-cn/blog/seata-at-mode-design.html", - "meta": { - "title": "分布式事务中间件 Seata 的设计原理", - "author": "张乘辉", - "keywords": "Seata、分布式事务、AT模式", - "description": "AT 模式设计原理", - "date": "2019/07/11" - } -} \ No newline at end of file diff --git a/zh-cn/blog/seata-at-mode-start-rm-tm.html b/zh-cn/blog/seata-at-mode-start-rm-tm.html deleted file mode 100644 index 9a817656..00000000 --- a/zh-cn/blog/seata-at-mode-start-rm-tm.html +++ /dev/null @@ -1,45 +0,0 @@ - - - - - - - - - - Seata 客户端需要同时启动 RM 和 TM 吗? - - - - -

在分析启动部分源码时,我发现 GlobalTransactionScanner 会同时启动 RM 和 TM client,但根据 Seata 的设计来看,TM 负责全局事务的操作,如果一个服务中不需要开启全局事务,此时是不需要启动 TM client的,也就是说项目中如果没有全局事务注解,此时是不是就不需要初始化 TM client 了,因为不是每个微服务,都需要 GlobalTransactional,它此时仅仅作为一个 RM client 而已。

-

于是我着手将 GlobalTransactionScanner 稍微更改了初始化的规则,由于之前 GlobalTransactionScanner 调用 初始化方法是在 InitializingBean 中的 afterPropertiesSet() 方法中进行,afterPropertySet() 仅仅是当前 bean 初始化后被调用,此时无法得知当前 Spring 容器是否有全局事务注解。

-

因此我去掉了 InitializingBean,改成了是实现 ApplicationListener,在实例化 bean 的过程中检查是否有 GlobalTransactional 注解的存在,最后在 Spring 容器初始化完成之后再调用 RM 和 TM client 初始化方法,这时候就可以根据项目是否有用到全局事务注解来决定是否启动 TM client 了。

-

这里附上 PR 地址:https://github.com/seata/seata/pull/1936

-

随后在 pr 中讨论中得知,目前 Seata 的设计是只有在发起方的 TM 才可以发起 GlobalRollbackRequest,RM 只能发送 BranchReport(false) 上报分支状态个 TC 服务端,无法直接发送 GlobalRollbackRequest 进行全局回滚操作。具体的交互逻辑如下:

-

-

那么根据上面的设计模型,自然可以按需启动 TM client 了。

-

但是 Seata 后面的优化迭代中,还需要考虑的一点是:

-

当参与方出现异常时,是否可以直接由参与方的 TM client 发起全局回滚?这也就意味着可以缩短分布式事务的周期时间,尽快释放全局锁让其他数据冲突的事务尽早的获取到锁执行。

-

-

也就是说在一个全局事务当中,只要有一个 RM client 执行本地事务失败了,直接当前服务的 TM client 发起全局事务回滚,不必要等待发起方的 TM 发起的决议回滚通知了。如果要实现这个优化,那么就需要每个服务都需要同时启动 TM client 和 RM client。

-

作者简介:

-

张乘辉,目前就职于中通科技信息中心技术平台部,担任 Java 工程师,主要负责中通消息平台与全链路压测项目的研发,热爱分享技术,微信公众号「后端进阶」作者,技术博客(https://objcoding.com/)博主,Seata Contributor,GitHub ID:objcoding。

-
- - - - - - - diff --git a/zh-cn/blog/seata-at-mode-start-rm-tm.json b/zh-cn/blog/seata-at-mode-start-rm-tm.json deleted file mode 100644 index 311eb5d2..00000000 --- a/zh-cn/blog/seata-at-mode-start-rm-tm.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "filename": "seata-at-mode-start-rm-tm.md", - "__html": "

在分析启动部分源码时,我发现 GlobalTransactionScanner 会同时启动 RM 和 TM client,但根据 Seata 的设计来看,TM 负责全局事务的操作,如果一个服务中不需要开启全局事务,此时是不需要启动 TM client的,也就是说项目中如果没有全局事务注解,此时是不是就不需要初始化 TM client 了,因为不是每个微服务,都需要 GlobalTransactional,它此时仅仅作为一个 RM client 而已。

\n

于是我着手将 GlobalTransactionScanner 稍微更改了初始化的规则,由于之前 GlobalTransactionScanner 调用 初始化方法是在 InitializingBean 中的 afterPropertiesSet() 方法中进行,afterPropertySet() 仅仅是当前 bean 初始化后被调用,此时无法得知当前 Spring 容器是否有全局事务注解。

\n

因此我去掉了 InitializingBean,改成了是实现 ApplicationListener,在实例化 bean 的过程中检查是否有 GlobalTransactional 注解的存在,最后在 Spring 容器初始化完成之后再调用 RM 和 TM client 初始化方法,这时候就可以根据项目是否有用到全局事务注解来决定是否启动 TM client 了。

\n

这里附上 PR 地址:https://github.com/seata/seata/pull/1936

\n

随后在 pr 中讨论中得知,目前 Seata 的设计是只有在发起方的 TM 才可以发起 GlobalRollbackRequest,RM 只能发送 BranchReport(false) 上报分支状态个 TC 服务端,无法直接发送 GlobalRollbackRequest 进行全局回滚操作。具体的交互逻辑如下:

\n

\"\"

\n

那么根据上面的设计模型,自然可以按需启动 TM client 了。

\n

但是 Seata 后面的优化迭代中,还需要考虑的一点是:

\n

当参与方出现异常时,是否可以直接由参与方的 TM client 发起全局回滚?这也就意味着可以缩短分布式事务的周期时间,尽快释放全局锁让其他数据冲突的事务尽早的获取到锁执行。

\n

\"\"

\n

也就是说在一个全局事务当中,只要有一个 RM client 执行本地事务失败了,直接当前服务的 TM client 发起全局事务回滚,不必要等待发起方的 TM 发起的决议回滚通知了。如果要实现这个优化,那么就需要每个服务都需要同时启动 TM client 和 RM client。

\n

作者简介:

\n

张乘辉,目前就职于中通科技信息中心技术平台部,担任 Java 工程师,主要负责中通消息平台与全链路压测项目的研发,热爱分享技术,微信公众号「后端进阶」作者,技术博客(https://objcoding.com/)博主,Seata Contributor,GitHub ID:objcoding。

\n", - "link": "/zh-cn/blog/seata-at-mode-start-rm-tm.html", - "meta": { - "title": "Seata 客户端需要同时启动 RM 和 TM 吗?", - "author": "张乘辉", - "keywords": "Seata、分布式事务、AT模式、RM、TM", - "description": "关于 Seata 后续优化的一个讨论点", - "date": "2019/11/28" - } -} \ No newline at end of file diff --git a/zh-cn/blog/seata-at-mode-start.html b/zh-cn/blog/seata-at-mode-start.html deleted file mode 100644 index 6cd028b7..00000000 --- a/zh-cn/blog/seata-at-mode-start.html +++ /dev/null @@ -1,395 +0,0 @@ - - - - - - - - - - Seata AT 模式启动源码分析 - - - - -

前言

-

从上一篇文章「分布式事务中间件Seata的设计原理」讲了下 Seata AT 模式的一些设计原理,从中也知道了 AT 模式的三个角色(RM、TM、TC),接下来我会更新 Seata 源码分析系列文章。今天就来分析 Seata AT 模式在启动的时候都做了哪些操作。

-

客户端启动逻辑

-

TM 是负责整个全局事务的管理器,因此一个全局事务是由 TM 开启的,TM 有个全局管理类 GlobalTransaction,结构如下:

-

io.seata.tm.api.GlobalTransaction

-
public interface GlobalTransaction {
-
-  void begin() throws TransactionException;
-
-  void begin(int timeout) throws TransactionException;
-
-  void begin(int timeout, String name) throws TransactionException;
-
-  void commit() throws TransactionException;
-
-  void rollback() throws TransactionException;
-  
-  GlobalStatus getStatus() throws TransactionException;
-  
-  // ...
-}
-
-

可以通过 GlobalTransactionContext 创建一个 GlobalTransaction,然后用 GlobalTransaction 进行全局事务的开启、提交、回滚等操作,因此我们直接用 API 方式使用 Seata AT 模式:

-
//init seata;
-TMClient.init(applicationId, txServiceGroup);
-RMClient.init(applicationId, txServiceGroup);
-//trx
-GlobalTransaction tx = GlobalTransactionContext.getCurrentOrCreate();
-try {
-  tx.begin(60000, "testBiz");
-  // 事务处理
-  // ...
-  tx.commit();
-} catch (Exception exx) {
-  tx.rollback();
-  throw exx;
-}
-
-

如果每次使用全局事务都这样写,难免会造成代码冗余,我们的项目都是基于 Spring 容器,这时我们可以利用 Spring AOP 的特性,用模板模式把这些冗余代码封装模版里,参考 Mybatis-spring 也是做了这么一件事情,那么接下来我们来分析一下基于 Spring 的项目启动 Seata 并注册全局事务时都做了哪些工作。

-

我们开启一个全局事务是在方法上加上 @GlobalTransactional注解,Seata 的 Spring 模块中,有个 GlobalTransactionScanner,它的继承关系如下:

-
public class GlobalTransactionScanner extends AbstractAutoProxyCreator implements InitializingBean, ApplicationContextAware, DisposableBean {
-  // ...
-}
-
-

在基于 Spring 项目的启动过程中,对该类会有如下初始化流程:

-

image-20191124155455309

-

InitializingBean 的 afterPropertiesSet() 方法调用了 initClient() 方法:

-

io.seata.spring.annotation.GlobalTransactionScanner#initClient

-
TMClient.init(applicationId, txServiceGroup);
-RMClient.init(applicationId, txServiceGroup);
-
-

对 TM 和 RM 做了初始化操作。

-
    -
  • TM 初始化
  • -
-

io.seata.tm.TMClient#init

-
public static void init(String applicationId, String transactionServiceGroup) {
-  // 获取 TmRpcClient 实例
-  TmRpcClient tmRpcClient = TmRpcClient.getInstance(applicationId, transactionServiceGroup);
-  // 初始化 TM Client
-  tmRpcClient.init();
-}
-
-

调用 TmRpcClient.getInstance() 方法会获取一个 TM 客户端实例,在获取过程中,会创建 Netty 客户端配置文件对象,以及创建 messageExecutor 线程池,该线程池用于在处理各种与服务端的消息交互,在创建 TmRpcClient 实例时,创建 ClientBootstrap,用于管理 Netty 服务的启停,以及 ClientChannelManager,它是专门用于管理 Netty 客户端对象池,Seata 的 Netty 部分配合使用了对象池,后面在分析网络模块会讲到。

-

io.seata.core.rpc.netty.AbstractRpcRemotingClient#init

-
public void init() {
-  clientBootstrap.start();
-  // 定时尝试连接服务端
-  timerExecutor.scheduleAtFixedRate(new Runnable() {
-    @Override
-    public void run() {
-      clientChannelManager.reconnect(getTransactionServiceGroup());
-    }
-  }, SCHEDULE_INTERVAL_MILLS, SCHEDULE_INTERVAL_MILLS, TimeUnit.SECONDS);
-  mergeSendExecutorService = new ThreadPoolExecutor(MAX_MERGE_SEND_THREAD,
-                                                    MAX_MERGE_SEND_THREAD,
-                                                    KEEP_ALIVE_TIME, TimeUnit.MILLISECONDS,
-                                                    new LinkedBlockingQueue<>(),
-                                                    new NamedThreadFactory(getThreadPrefix(), MAX_MERGE_SEND_THREAD));
-  mergeSendExecutorService.submit(new MergedSendRunnable());
-  super.init();
-}
-
-

调用 TM 客户端 init() 方法,最终会启动 netty 客户端(此时还未真正启动,在对象池被调用时才会被真正启动);开启一个定时任务,定时重新发送 RegisterTMRequest(RM 客户端会发送 RegisterRMRequest)请求尝试连接服务端,具体逻辑是在 NettyClientChannelManager 中的 channels 中缓存了客户端 channel,如果此时 channels 不存在获取已过期,那么就会尝试连接服务端以重新获取 channel 并将其缓存到 channels 中;开启一条单独线程,用于处理异步请求发送,这里用得很巧妙,之后在分析网络模块在具体对其进行分析。

-

io.seata.core.rpc.netty.AbstractRpcRemoting#init

-
public void init() {
-  timerExecutor.scheduleAtFixedRate(new Runnable() {
-    @Override
-    public void run() {
-      for (Map.Entry<Integer, MessageFuture> entry : futures.entrySet()) {
-        if (entry.getValue().isTimeout()) {
-          futures.remove(entry.getKey());
-          entry.getValue().setResultMessage(null);
-          if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug("timeout clear future: {}", entry.getValue().getRequestMessage().getBody());
-          }
-        }
-      }
-
-      nowMills = System.currentTimeMillis();
-    }
-  }, TIMEOUT_CHECK_INTERNAL, TIMEOUT_CHECK_INTERNAL, TimeUnit.MILLISECONDS);
-}
-
-

在 AbstractRpcRemoting 的 init 方法中,又是开启了一个定时任务,该定时任务主要是用于定时清除 futures 已过期的 futrue,futures 是保存发送请求需要返回结果的 future 对象,该对象有个超时时间,过了超时时间就会自动抛异常,因此需要定时清除已过期的 future 对象。

-
    -
  • RM 初始化
  • -
-

io.seata.rm.RMClient#init

-
public static void init(String applicationId, String transactionServiceGroup) {
-  RmRpcClient rmRpcClient = RmRpcClient.getInstance(applicationId, transactionServiceGroup);
-  rmRpcClient.setResourceManager(DefaultResourceManager.get());
-  rmRpcClient.setClientMessageListener(new RmMessageListener(DefaultRMHandler.get()));
-  rmRpcClient.init();
-}
-
-

RmRpcClient.getInstance 处理逻辑与 TM 大致相同;ResourceManager 是 RM 资源管理器,负责分支事务的注册、提交、上报、以及回滚操作,以及全局锁的查询操作,DefaultResourceManager 会持有当前所有的 RM 资源管理器,进行统一调用处理,而 get() 方法主要是加载当前的资源管理器,主要用了类似 SPI 的机制,进行灵活加载,如下图,Seata 会扫描 META-INF/services/ 目录下的配置类并进行动态加载。

-

ClientMessageListener 是 RM 消息处理监听器,用于负责处理从 TC 发送过来的指令,并对分支进行分支提交、分支回滚,以及 undo log 删除操作;最后 init 方法跟 TM 逻辑也大体一致;DefaultRMHandler 封装了 RM 分支事务的一些具体操作逻辑。

-

接下来再看看 wrapIfNecessary 方法究竟做了哪些操作。

-

io.seata.spring.annotation.GlobalTransactionScanner#wrapIfNecessary

-
protected Object wrapIfNecessary(Object bean, String beanName, Object cacheKey) {
-  // 判断是否有开启全局事务
-  if (disableGlobalTransaction) {
-    return bean;
-  }
-  try {
-    synchronized (PROXYED_SET) {
-      if (PROXYED_SET.contains(beanName)) {
-        return bean;
-      }
-      interceptor = null;
-      //check TCC proxy
-      if (TCCBeanParserUtils.isTccAutoProxy(bean, beanName, applicationContext)) {
-        //TCC interceptor, proxy bean of sofa:reference/dubbo:reference, and LocalTCC
-        interceptor = new TccActionInterceptor(TCCBeanParserUtils.getRemotingDesc(beanName));
-      } else {
-        Class<?> serviceInterface = SpringProxyUtils.findTargetClass(bean);
-        Class<?>[] interfacesIfJdk = SpringProxyUtils.findInterfaces(bean);
-
-        // 判断 bean 中是否有 GlobalTransactional 和 GlobalLock 注解
-        if (!existsAnnotation(new Class[]{serviceInterface})
-            && !existsAnnotation(interfacesIfJdk)) {
-          return bean;
-        }
-
-        if (interceptor == null) {
-          // 创建代理类
-          interceptor = new GlobalTransactionalInterceptor(failureHandlerHook);
-        }
-      }
-
-      LOGGER.info("Bean[{}] with name [{}] would use interceptor [{}]",
-                  bean.getClass().getName(), beanName, interceptor.getClass().getName());
-      if (!AopUtils.isAopProxy(bean)) {
-        bean = super.wrapIfNecessary(bean, beanName, cacheKey);
-      } else {
-        AdvisedSupport advised = SpringProxyUtils.getAdvisedSupport(bean);
-        // 执行包装目标对象到代理对象  
-        Advisor[] advisor = super.buildAdvisors(beanName, getAdvicesAndAdvisorsForBean(null, null, null));
-        for (Advisor avr : advisor) {
-          advised.addAdvisor(0, avr);
-        }
-      }
-      PROXYED_SET.add(beanName);
-      return bean;
-    }
-  } catch (Exception exx) {
-    throw new RuntimeException(exx);
-  }
-}
-
-

GlobalTransactionScanner 继承了 AbstractAutoProxyCreator,用于对 Spring AOP 支持,从代码中可看出,用GlobalTransactionalInterceptor 代替了被 GlobalTransactional 和 GlobalLock 注解的方法。

-

GlobalTransactionalInterceptor 实现了 MethodInterceptor:

-

io.seata.spring.annotation.GlobalTransactionalInterceptor#invoke

-
public Object invoke(final MethodInvocation methodInvocation) throws Throwable {
-  Class<?> targetClass = methodInvocation.getThis() != null ? AopUtils.getTargetClass(methodInvocation.getThis()) : null;
-  Method specificMethod = ClassUtils.getMostSpecificMethod(methodInvocation.getMethod(), targetClass);
-  final Method method = BridgeMethodResolver.findBridgedMethod(specificMethod);
-
-  final GlobalTransactional globalTransactionalAnnotation = getAnnotation(method, GlobalTransactional.class);
-  final GlobalLock globalLockAnnotation = getAnnotation(method, GlobalLock.class);
-  if (globalTransactionalAnnotation != null) {
-    // 全局事务注解
-    return handleGlobalTransaction(methodInvocation, globalTransactionalAnnotation);
-  } else if (globalLockAnnotation != null) {
-    // 全局锁注解
-    return handleGlobalLock(methodInvocation);
-  } else {
-    return methodInvocation.proceed();
-  }
-}
-
-

以上是代理方法执行的逻辑逻辑,其中 handleGlobalTransaction() 方法里面调用了 TransactionalTemplate 模版:

-

io.seata.spring.annotation.GlobalTransactionalInterceptor#handleGlobalTransaction

-
private Object handleGlobalTransaction(final MethodInvocation methodInvocation,
-                                       final GlobalTransactional globalTrxAnno) throws Throwable {
-  try {
-    return transactionalTemplate.execute(new TransactionalExecutor() {
-      @Override
-      public Object execute() throws Throwable {
-        return methodInvocation.proceed();
-      }
-      @Override
-      public TransactionInfo getTransactionInfo() {
-        // ...
-      }
-    });
-  } catch (TransactionalExecutor.ExecutionException e) {
-    // ...
-  }
-}
-
-

handleGlobalTransaction() 方法执行了就是 TransactionalTemplate 模版类的 execute 方法:

-

io.seata.tm.api.TransactionalTemplate#execute

-
public Object execute(TransactionalExecutor business) throws Throwable {
-  // 1. get or create a transaction
-  GlobalTransaction tx = GlobalTransactionContext.getCurrentOrCreate();
-
-  // 1.1 get transactionInfo
-  TransactionInfo txInfo = business.getTransactionInfo();
-  if (txInfo == null) {
-    throw new ShouldNeverHappenException("transactionInfo does not exist");
-  }
-  try {
-
-    // 2. begin transaction
-    beginTransaction(txInfo, tx);
-
-    Object rs = null;
-    try {
-
-      // Do Your Business
-      rs = business.execute();
-
-    } catch (Throwable ex) {
-
-      // 3.the needed business exception to rollback.
-      completeTransactionAfterThrowing(txInfo,tx,ex);
-      throw ex;
-    }
-
-    // 4. everything is fine, commit.
-    commitTransaction(tx);
-
-    return rs;
-  } finally {
-    //5. clear
-    triggerAfterCompletion();
-    cleanUp();
-  }
-}
-
-

以上是不是有一种似曾相识的感觉?没错,以上就是我们使用 API 时经常写的冗余代码,现在 Spring 通过代理模式,把这些冗余代码都封装带模版里面了,它将那些冗余代码统统封装起来统一流程处理,并不需要你显示写出来了,有兴趣的也可以去看看 Mybatis-spring 的源码,也是写得非常精彩。

-

服务端处理逻辑

-

服务端收到客户端的连接,那当然是将其 channel 也缓存起来,前面也说到客户端会发送 RegisterRMRequest/RegisterTMRequest 请求给服务端,服务端收到后会调用 ServerMessageListener 监听器处理:

-

io.seata.core.rpc.ServerMessageListener

-
public interface ServerMessageListener {
-  // 处理各种事务,如分支注册、分支提交、分支上报、分支回滚等等
-  void onTrxMessage(RpcMessage request, ChannelHandlerContext ctx, ServerMessageSender sender);
-	// 处理 RM 客户端的注册连接
-  void onRegRmMessage(RpcMessage request, ChannelHandlerContext ctx,
-                      ServerMessageSender sender, RegisterCheckAuthHandler checkAuthHandler);
-  // 处理 TM 客户端的注册连接
-  void onRegTmMessage(RpcMessage request, ChannelHandlerContext ctx,
-                      ServerMessageSender sender, RegisterCheckAuthHandler checkAuthHandler);
-  // 服务端与客户端保持心跳
-  void onCheckMessage(RpcMessage request, ChannelHandlerContext ctx, ServerMessageSender sender)
-
-}
-
-

ChannelManager 是服务端 channel 的管理器,服务端每次和客户端通信,都需要从 ChannelManager 中获取客户端对应的 channel,它用于保存 TM 和 RM 客户端 channel 的缓存结构如下:

-
/**
- * resourceId -> applicationId -> ip -> port -> RpcContext
- */
-private static final ConcurrentMap<String, ConcurrentMap<String, ConcurrentMap<String, ConcurrentMap<Integer,
-RpcContext>>>>
-  RM_CHANNELS = new ConcurrentHashMap<String, ConcurrentMap<String, ConcurrentMap<String, ConcurrentMap<Integer,
-RpcContext>>>>();
-
-/**
- * ip+appname,port
- */
-private static final ConcurrentMap<String, ConcurrentMap<Integer, RpcContext>> TM_CHANNELS
-  = new ConcurrentHashMap<String, ConcurrentMap<Integer, RpcContext>>();
-
-

以上的 Map 结构有点复杂:

-

RM_CHANNELS:

-
    -
  1. resourceId 指的是 RM client 的数据库地址;
  2. -
  3. applicationId 指的是 RM client 的服务 Id,比如 springboot 的配置 spring.application.name=account-service 中的 account-service 即是 applicationId;
  4. -
  5. ip 指的是 RM client 服务地址;
  6. -
  7. port 指的是 RM client 服务地址;
  8. -
  9. RpcContext 保存了本次注册请求的信息。
  10. -
-

TM_CHANNELS:

-
    -
  1. ip+appname:这里的注释应该是写错了,应该是 appname+ip,即 TM_CHANNELS 的 Map 结构第一个 key 为 appname+ip;
  2. -
  3. port:客户端的端口号。
  4. -
-

以下是 RM Client 注册逻辑:

-

io.seata.core.rpc.ChannelManager#registerRMChannel

-
public static void registerRMChannel(RegisterRMRequest resourceManagerRequest, Channel channel)
-  throws IncompatibleVersionException {
-  Version.checkVersion(resourceManagerRequest.getVersion());
-  // 将 ResourceIds 数据库连接连接信息放入一个set中
-  Set<String> dbkeySet = dbKeytoSet(resourceManagerRequest.getResourceIds());
-  RpcContext rpcContext;
-  // 从缓存中判断是否有该channel信息
-  if (!IDENTIFIED_CHANNELS.containsKey(channel)) {
-    // 根据请求注册信息,构建 rpcContext
-    rpcContext = buildChannelHolder(NettyPoolKey.TransactionRole.RMROLE, resourceManagerRequest.getVersion(),
-                                    resourceManagerRequest.getApplicationId(), resourceManagerRequest.getTransactionServiceGroup(),
-                                    resourceManagerRequest.getResourceIds(), channel);
-    // 将 rpcContext 放入缓存中
-    rpcContext.holdInIdentifiedChannels(IDENTIFIED_CHANNELS);
-  } else {
-    rpcContext = IDENTIFIED_CHANNELS.get(channel);
-    rpcContext.addResources(dbkeySet);
-  }
-  if (null == dbkeySet || dbkeySet.isEmpty()) { return; }
-  for (String resourceId : dbkeySet) {
-    String clientIp;
-    // 将请求信息存入 RM_CHANNELS 中,这里用了 java8 的 computeIfAbsent 方法操作
-    ConcurrentMap<Integer, RpcContext> portMap = RM_CHANNELS.computeIfAbsent(resourceId, resourceIdKey -> new ConcurrentHashMap<>())
-      .computeIfAbsent(resourceManagerRequest.getApplicationId(), applicationId -> new ConcurrentHashMap<>())
-      .computeIfAbsent(clientIp = getClientIpFromChannel(channel), clientIpKey -> new ConcurrentHashMap<>());
-		// 将当前 rpcContext 放入 portMap 中
-    rpcContext.holdInResourceManagerChannels(resourceId, portMap);
-    updateChannelsResource(resourceId, clientIp, resourceManagerRequest.getApplicationId());
-  }
-}
-
-

从以上代码逻辑能够看出,注册 RM client 主要是将注册请求信息,放入 RM_CHANNELS 缓存中,同时还会从 IDENTIFIED_CHANNELS 中判断本次请求的 channel 是否已验证过,IDENTIFIED_CHANNELS 的结构如下:

-
private static final ConcurrentMap<Channel, RpcContext> IDENTIFIED_CHANNELS
-  = new ConcurrentHashMap<>();
-
-

IDENTIFIED_CHANNELS 包含了所有 TM 和 RM 已注册的 channel。

-

以下是 TM 注册逻辑:

-

io.seata.core.rpc.ChannelManager#registerTMChannel

-
public static void registerTMChannel(RegisterTMRequest request, Channel channel)
-  throws IncompatibleVersionException {
-  Version.checkVersion(request.getVersion());
-  // 根据请求注册信息,构建 RpcContext
-  RpcContext rpcContext = buildChannelHolder(NettyPoolKey.TransactionRole.TMROLE, request.getVersion(),
-                                             request.getApplicationId(),
-                                             request.getTransactionServiceGroup(),
-                                             null, channel);
-  // 将 RpcContext 放入 IDENTIFIED_CHANNELS 缓存中
-  rpcContext.holdInIdentifiedChannels(IDENTIFIED_CHANNELS);
-  // account-service:127.0.0.1:63353
-  String clientIdentified = rpcContext.getApplicationId() + Constants.CLIENT_ID_SPLIT_CHAR
-    + getClientIpFromChannel(channel);
-  // 将请求信息存入 TM_CHANNELS 缓存中
-  TM_CHANNELS.putIfAbsent(clientIdentified, new ConcurrentHashMap<Integer, RpcContext>());
-  // 将上一步创建好的get出来,之后再将rpcContext放入这个map的value中
-  ConcurrentMap<Integer, RpcContext> clientIdentifiedMap = TM_CHANNELS.get(clientIdentified);
-  rpcContext.holdInClientChannels(clientIdentifiedMap);
-}
-
-

TM client 的注册大体类似,把本次注册的信息放入对应的缓存中保存,但比 RM client 的注册逻辑简单一些,主要是 RM client 会涉及分支事务资源的信息,需要注册的信息也会比 TM client 多。

-

以上源码分析基于 0.9.0 版本。

-

作者简介

-

张乘辉,目前就职于中通科技信息中心技术平台部,担任 Java 工程师,主要负责中通消息平台与全链路压测项目的研发,热爱分享技术,微信公众号「后端进阶」作者,技术博客(https://objcoding.com/)博主,Seata Contributor,GitHub ID:objcoding。

-
- - - - - - - diff --git a/zh-cn/blog/seata-at-mode-start.json b/zh-cn/blog/seata-at-mode-start.json deleted file mode 100644 index 7a1efee1..00000000 --- a/zh-cn/blog/seata-at-mode-start.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "filename": "seata-at-mode-start.md", - "__html": "

前言

\n

从上一篇文章「分布式事务中间件Seata的设计原理」讲了下 Seata AT 模式的一些设计原理,从中也知道了 AT 模式的三个角色(RM、TM、TC),接下来我会更新 Seata 源码分析系列文章。今天就来分析 Seata AT 模式在启动的时候都做了哪些操作。

\n

客户端启动逻辑

\n

TM 是负责整个全局事务的管理器,因此一个全局事务是由 TM 开启的,TM 有个全局管理类 GlobalTransaction,结构如下:

\n

io.seata.tm.api.GlobalTransaction

\n
public interface GlobalTransaction {\n\n  void begin() throws TransactionException;\n\n  void begin(int timeout) throws TransactionException;\n\n  void begin(int timeout, String name) throws TransactionException;\n\n  void commit() throws TransactionException;\n\n  void rollback() throws TransactionException;\n  \n  GlobalStatus getStatus() throws TransactionException;\n  \n  // ...\n}\n
\n

可以通过 GlobalTransactionContext 创建一个 GlobalTransaction,然后用 GlobalTransaction 进行全局事务的开启、提交、回滚等操作,因此我们直接用 API 方式使用 Seata AT 模式:

\n
//init seata;\nTMClient.init(applicationId, txServiceGroup);\nRMClient.init(applicationId, txServiceGroup);\n//trx\nGlobalTransaction tx = GlobalTransactionContext.getCurrentOrCreate();\ntry {\n  tx.begin(60000, \"testBiz\");\n  // 事务处理\n  // ...\n  tx.commit();\n} catch (Exception exx) {\n  tx.rollback();\n  throw exx;\n}\n
\n

如果每次使用全局事务都这样写,难免会造成代码冗余,我们的项目都是基于 Spring 容器,这时我们可以利用 Spring AOP 的特性,用模板模式把这些冗余代码封装模版里,参考 Mybatis-spring 也是做了这么一件事情,那么接下来我们来分析一下基于 Spring 的项目启动 Seata 并注册全局事务时都做了哪些工作。

\n

我们开启一个全局事务是在方法上加上 @GlobalTransactional注解,Seata 的 Spring 模块中,有个 GlobalTransactionScanner,它的继承关系如下:

\n
public class GlobalTransactionScanner extends AbstractAutoProxyCreator implements InitializingBean, ApplicationContextAware, DisposableBean {\n  // ...\n}\n
\n

在基于 Spring 项目的启动过程中,对该类会有如下初始化流程:

\n

\"image-20191124155455309\"

\n

InitializingBean 的 afterPropertiesSet() 方法调用了 initClient() 方法:

\n

io.seata.spring.annotation.GlobalTransactionScanner#initClient

\n
TMClient.init(applicationId, txServiceGroup);\nRMClient.init(applicationId, txServiceGroup);\n
\n

对 TM 和 RM 做了初始化操作。

\n
    \n
  • TM 初始化
  • \n
\n

io.seata.tm.TMClient#init

\n
public static void init(String applicationId, String transactionServiceGroup) {\n  // 获取 TmRpcClient 实例\n  TmRpcClient tmRpcClient = TmRpcClient.getInstance(applicationId, transactionServiceGroup);\n  // 初始化 TM Client\n  tmRpcClient.init();\n}\n
\n

调用 TmRpcClient.getInstance() 方法会获取一个 TM 客户端实例,在获取过程中,会创建 Netty 客户端配置文件对象,以及创建 messageExecutor 线程池,该线程池用于在处理各种与服务端的消息交互,在创建 TmRpcClient 实例时,创建 ClientBootstrap,用于管理 Netty 服务的启停,以及 ClientChannelManager,它是专门用于管理 Netty 客户端对象池,Seata 的 Netty 部分配合使用了对象池,后面在分析网络模块会讲到。

\n

io.seata.core.rpc.netty.AbstractRpcRemotingClient#init

\n
public void init() {\n  clientBootstrap.start();\n  // 定时尝试连接服务端\n  timerExecutor.scheduleAtFixedRate(new Runnable() {\n    @Override\n    public void run() {\n      clientChannelManager.reconnect(getTransactionServiceGroup());\n    }\n  }, SCHEDULE_INTERVAL_MILLS, SCHEDULE_INTERVAL_MILLS, TimeUnit.SECONDS);\n  mergeSendExecutorService = new ThreadPoolExecutor(MAX_MERGE_SEND_THREAD,\n                                                    MAX_MERGE_SEND_THREAD,\n                                                    KEEP_ALIVE_TIME, TimeUnit.MILLISECONDS,\n                                                    new LinkedBlockingQueue<>(),\n                                                    new NamedThreadFactory(getThreadPrefix(), MAX_MERGE_SEND_THREAD));\n  mergeSendExecutorService.submit(new MergedSendRunnable());\n  super.init();\n}\n
\n

调用 TM 客户端 init() 方法,最终会启动 netty 客户端(此时还未真正启动,在对象池被调用时才会被真正启动);开启一个定时任务,定时重新发送 RegisterTMRequest(RM 客户端会发送 RegisterRMRequest)请求尝试连接服务端,具体逻辑是在 NettyClientChannelManager 中的 channels 中缓存了客户端 channel,如果此时 channels 不存在获取已过期,那么就会尝试连接服务端以重新获取 channel 并将其缓存到 channels 中;开启一条单独线程,用于处理异步请求发送,这里用得很巧妙,之后在分析网络模块在具体对其进行分析。

\n

io.seata.core.rpc.netty.AbstractRpcRemoting#init

\n
public void init() {\n  timerExecutor.scheduleAtFixedRate(new Runnable() {\n    @Override\n    public void run() {\n      for (Map.Entry<Integer, MessageFuture> entry : futures.entrySet()) {\n        if (entry.getValue().isTimeout()) {\n          futures.remove(entry.getKey());\n          entry.getValue().setResultMessage(null);\n          if (LOGGER.isDebugEnabled()) {\n            LOGGER.debug(\"timeout clear future: {}\", entry.getValue().getRequestMessage().getBody());\n          }\n        }\n      }\n\n      nowMills = System.currentTimeMillis();\n    }\n  }, TIMEOUT_CHECK_INTERNAL, TIMEOUT_CHECK_INTERNAL, TimeUnit.MILLISECONDS);\n}\n
\n

在 AbstractRpcRemoting 的 init 方法中,又是开启了一个定时任务,该定时任务主要是用于定时清除 futures 已过期的 futrue,futures 是保存发送请求需要返回结果的 future 对象,该对象有个超时时间,过了超时时间就会自动抛异常,因此需要定时清除已过期的 future 对象。

\n
    \n
  • RM 初始化
  • \n
\n

io.seata.rm.RMClient#init

\n
public static void init(String applicationId, String transactionServiceGroup) {\n  RmRpcClient rmRpcClient = RmRpcClient.getInstance(applicationId, transactionServiceGroup);\n  rmRpcClient.setResourceManager(DefaultResourceManager.get());\n  rmRpcClient.setClientMessageListener(new RmMessageListener(DefaultRMHandler.get()));\n  rmRpcClient.init();\n}\n
\n

RmRpcClient.getInstance 处理逻辑与 TM 大致相同;ResourceManager 是 RM 资源管理器,负责分支事务的注册、提交、上报、以及回滚操作,以及全局锁的查询操作,DefaultResourceManager 会持有当前所有的 RM 资源管理器,进行统一调用处理,而 get() 方法主要是加载当前的资源管理器,主要用了类似 SPI 的机制,进行灵活加载,如下图,Seata 会扫描 META-INF/services/ 目录下的配置类并进行动态加载。

\n

ClientMessageListener 是 RM 消息处理监听器,用于负责处理从 TC 发送过来的指令,并对分支进行分支提交、分支回滚,以及 undo log 删除操作;最后 init 方法跟 TM 逻辑也大体一致;DefaultRMHandler 封装了 RM 分支事务的一些具体操作逻辑。

\n

接下来再看看 wrapIfNecessary 方法究竟做了哪些操作。

\n

io.seata.spring.annotation.GlobalTransactionScanner#wrapIfNecessary

\n
protected Object wrapIfNecessary(Object bean, String beanName, Object cacheKey) {\n  // 判断是否有开启全局事务\n  if (disableGlobalTransaction) {\n    return bean;\n  }\n  try {\n    synchronized (PROXYED_SET) {\n      if (PROXYED_SET.contains(beanName)) {\n        return bean;\n      }\n      interceptor = null;\n      //check TCC proxy\n      if (TCCBeanParserUtils.isTccAutoProxy(bean, beanName, applicationContext)) {\n        //TCC interceptor, proxy bean of sofa:reference/dubbo:reference, and LocalTCC\n        interceptor = new TccActionInterceptor(TCCBeanParserUtils.getRemotingDesc(beanName));\n      } else {\n        Class<?> serviceInterface = SpringProxyUtils.findTargetClass(bean);\n        Class<?>[] interfacesIfJdk = SpringProxyUtils.findInterfaces(bean);\n\n        // 判断 bean 中是否有 GlobalTransactional 和 GlobalLock 注解\n        if (!existsAnnotation(new Class[]{serviceInterface})\n            && !existsAnnotation(interfacesIfJdk)) {\n          return bean;\n        }\n\n        if (interceptor == null) {\n          // 创建代理类\n          interceptor = new GlobalTransactionalInterceptor(failureHandlerHook);\n        }\n      }\n\n      LOGGER.info(\"Bean[{}] with name [{}] would use interceptor [{}]\",\n                  bean.getClass().getName(), beanName, interceptor.getClass().getName());\n      if (!AopUtils.isAopProxy(bean)) {\n        bean = super.wrapIfNecessary(bean, beanName, cacheKey);\n      } else {\n        AdvisedSupport advised = SpringProxyUtils.getAdvisedSupport(bean);\n        // 执行包装目标对象到代理对象  \n        Advisor[] advisor = super.buildAdvisors(beanName, getAdvicesAndAdvisorsForBean(null, null, null));\n        for (Advisor avr : advisor) {\n          advised.addAdvisor(0, avr);\n        }\n      }\n      PROXYED_SET.add(beanName);\n      return bean;\n    }\n  } catch (Exception exx) {\n    throw new RuntimeException(exx);\n  }\n}\n
\n

GlobalTransactionScanner 继承了 AbstractAutoProxyCreator,用于对 Spring AOP 支持,从代码中可看出,用GlobalTransactionalInterceptor 代替了被 GlobalTransactional 和 GlobalLock 注解的方法。

\n

GlobalTransactionalInterceptor 实现了 MethodInterceptor:

\n

io.seata.spring.annotation.GlobalTransactionalInterceptor#invoke

\n
public Object invoke(final MethodInvocation methodInvocation) throws Throwable {\n  Class<?> targetClass = methodInvocation.getThis() != null ? AopUtils.getTargetClass(methodInvocation.getThis()) : null;\n  Method specificMethod = ClassUtils.getMostSpecificMethod(methodInvocation.getMethod(), targetClass);\n  final Method method = BridgeMethodResolver.findBridgedMethod(specificMethod);\n\n  final GlobalTransactional globalTransactionalAnnotation = getAnnotation(method, GlobalTransactional.class);\n  final GlobalLock globalLockAnnotation = getAnnotation(method, GlobalLock.class);\n  if (globalTransactionalAnnotation != null) {\n    // 全局事务注解\n    return handleGlobalTransaction(methodInvocation, globalTransactionalAnnotation);\n  } else if (globalLockAnnotation != null) {\n    // 全局锁注解\n    return handleGlobalLock(methodInvocation);\n  } else {\n    return methodInvocation.proceed();\n  }\n}\n
\n

以上是代理方法执行的逻辑逻辑,其中 handleGlobalTransaction() 方法里面调用了 TransactionalTemplate 模版:

\n

io.seata.spring.annotation.GlobalTransactionalInterceptor#handleGlobalTransaction

\n
private Object handleGlobalTransaction(final MethodInvocation methodInvocation,\n                                       final GlobalTransactional globalTrxAnno) throws Throwable {\n  try {\n    return transactionalTemplate.execute(new TransactionalExecutor() {\n      @Override\n      public Object execute() throws Throwable {\n        return methodInvocation.proceed();\n      }\n      @Override\n      public TransactionInfo getTransactionInfo() {\n        // ...\n      }\n    });\n  } catch (TransactionalExecutor.ExecutionException e) {\n    // ...\n  }\n}\n
\n

handleGlobalTransaction() 方法执行了就是 TransactionalTemplate 模版类的 execute 方法:

\n

io.seata.tm.api.TransactionalTemplate#execute

\n
public Object execute(TransactionalExecutor business) throws Throwable {\n  // 1. get or create a transaction\n  GlobalTransaction tx = GlobalTransactionContext.getCurrentOrCreate();\n\n  // 1.1 get transactionInfo\n  TransactionInfo txInfo = business.getTransactionInfo();\n  if (txInfo == null) {\n    throw new ShouldNeverHappenException(\"transactionInfo does not exist\");\n  }\n  try {\n\n    // 2. begin transaction\n    beginTransaction(txInfo, tx);\n\n    Object rs = null;\n    try {\n\n      // Do Your Business\n      rs = business.execute();\n\n    } catch (Throwable ex) {\n\n      // 3.the needed business exception to rollback.\n      completeTransactionAfterThrowing(txInfo,tx,ex);\n      throw ex;\n    }\n\n    // 4. everything is fine, commit.\n    commitTransaction(tx);\n\n    return rs;\n  } finally {\n    //5. clear\n    triggerAfterCompletion();\n    cleanUp();\n  }\n}\n
\n

以上是不是有一种似曾相识的感觉?没错,以上就是我们使用 API 时经常写的冗余代码,现在 Spring 通过代理模式,把这些冗余代码都封装带模版里面了,它将那些冗余代码统统封装起来统一流程处理,并不需要你显示写出来了,有兴趣的也可以去看看 Mybatis-spring 的源码,也是写得非常精彩。

\n

服务端处理逻辑

\n

服务端收到客户端的连接,那当然是将其 channel 也缓存起来,前面也说到客户端会发送 RegisterRMRequest/RegisterTMRequest 请求给服务端,服务端收到后会调用 ServerMessageListener 监听器处理:

\n

io.seata.core.rpc.ServerMessageListener

\n
public interface ServerMessageListener {\n  // 处理各种事务,如分支注册、分支提交、分支上报、分支回滚等等\n  void onTrxMessage(RpcMessage request, ChannelHandlerContext ctx, ServerMessageSender sender);\n\t// 处理 RM 客户端的注册连接\n  void onRegRmMessage(RpcMessage request, ChannelHandlerContext ctx,\n                      ServerMessageSender sender, RegisterCheckAuthHandler checkAuthHandler);\n  // 处理 TM 客户端的注册连接\n  void onRegTmMessage(RpcMessage request, ChannelHandlerContext ctx,\n                      ServerMessageSender sender, RegisterCheckAuthHandler checkAuthHandler);\n  // 服务端与客户端保持心跳\n  void onCheckMessage(RpcMessage request, ChannelHandlerContext ctx, ServerMessageSender sender)\n\n}\n
\n

ChannelManager 是服务端 channel 的管理器,服务端每次和客户端通信,都需要从 ChannelManager 中获取客户端对应的 channel,它用于保存 TM 和 RM 客户端 channel 的缓存结构如下:

\n
/**\n * resourceId -> applicationId -> ip -> port -> RpcContext\n */\nprivate static final ConcurrentMap<String, ConcurrentMap<String, ConcurrentMap<String, ConcurrentMap<Integer,\nRpcContext>>>>\n  RM_CHANNELS = new ConcurrentHashMap<String, ConcurrentMap<String, ConcurrentMap<String, ConcurrentMap<Integer,\nRpcContext>>>>();\n\n/**\n * ip+appname,port\n */\nprivate static final ConcurrentMap<String, ConcurrentMap<Integer, RpcContext>> TM_CHANNELS\n  = new ConcurrentHashMap<String, ConcurrentMap<Integer, RpcContext>>();\n
\n

以上的 Map 结构有点复杂:

\n

RM_CHANNELS:

\n
    \n
  1. resourceId 指的是 RM client 的数据库地址;
  2. \n
  3. applicationId 指的是 RM client 的服务 Id,比如 springboot 的配置 spring.application.name=account-service 中的 account-service 即是 applicationId;
  4. \n
  5. ip 指的是 RM client 服务地址;
  6. \n
  7. port 指的是 RM client 服务地址;
  8. \n
  9. RpcContext 保存了本次注册请求的信息。
  10. \n
\n

TM_CHANNELS:

\n
    \n
  1. ip+appname:这里的注释应该是写错了,应该是 appname+ip,即 TM_CHANNELS 的 Map 结构第一个 key 为 appname+ip;
  2. \n
  3. port:客户端的端口号。
  4. \n
\n

以下是 RM Client 注册逻辑:

\n

io.seata.core.rpc.ChannelManager#registerRMChannel

\n
public static void registerRMChannel(RegisterRMRequest resourceManagerRequest, Channel channel)\n  throws IncompatibleVersionException {\n  Version.checkVersion(resourceManagerRequest.getVersion());\n  // 将 ResourceIds 数据库连接连接信息放入一个set中\n  Set<String> dbkeySet = dbKeytoSet(resourceManagerRequest.getResourceIds());\n  RpcContext rpcContext;\n  // 从缓存中判断是否有该channel信息\n  if (!IDENTIFIED_CHANNELS.containsKey(channel)) {\n    // 根据请求注册信息,构建 rpcContext\n    rpcContext = buildChannelHolder(NettyPoolKey.TransactionRole.RMROLE, resourceManagerRequest.getVersion(),\n                                    resourceManagerRequest.getApplicationId(), resourceManagerRequest.getTransactionServiceGroup(),\n                                    resourceManagerRequest.getResourceIds(), channel);\n    // 将 rpcContext 放入缓存中\n    rpcContext.holdInIdentifiedChannels(IDENTIFIED_CHANNELS);\n  } else {\n    rpcContext = IDENTIFIED_CHANNELS.get(channel);\n    rpcContext.addResources(dbkeySet);\n  }\n  if (null == dbkeySet || dbkeySet.isEmpty()) { return; }\n  for (String resourceId : dbkeySet) {\n    String clientIp;\n    // 将请求信息存入 RM_CHANNELS 中,这里用了 java8 的 computeIfAbsent 方法操作\n    ConcurrentMap<Integer, RpcContext> portMap = RM_CHANNELS.computeIfAbsent(resourceId, resourceIdKey -> new ConcurrentHashMap<>())\n      .computeIfAbsent(resourceManagerRequest.getApplicationId(), applicationId -> new ConcurrentHashMap<>())\n      .computeIfAbsent(clientIp = getClientIpFromChannel(channel), clientIpKey -> new ConcurrentHashMap<>());\n\t\t// 将当前 rpcContext 放入 portMap 中\n    rpcContext.holdInResourceManagerChannels(resourceId, portMap);\n    updateChannelsResource(resourceId, clientIp, resourceManagerRequest.getApplicationId());\n  }\n}\n
\n

从以上代码逻辑能够看出,注册 RM client 主要是将注册请求信息,放入 RM_CHANNELS 缓存中,同时还会从 IDENTIFIED_CHANNELS 中判断本次请求的 channel 是否已验证过,IDENTIFIED_CHANNELS 的结构如下:

\n
private static final ConcurrentMap<Channel, RpcContext> IDENTIFIED_CHANNELS\n  = new ConcurrentHashMap<>();\n
\n

IDENTIFIED_CHANNELS 包含了所有 TM 和 RM 已注册的 channel。

\n

以下是 TM 注册逻辑:

\n

io.seata.core.rpc.ChannelManager#registerTMChannel

\n
public static void registerTMChannel(RegisterTMRequest request, Channel channel)\n  throws IncompatibleVersionException {\n  Version.checkVersion(request.getVersion());\n  // 根据请求注册信息,构建 RpcContext\n  RpcContext rpcContext = buildChannelHolder(NettyPoolKey.TransactionRole.TMROLE, request.getVersion(),\n                                             request.getApplicationId(),\n                                             request.getTransactionServiceGroup(),\n                                             null, channel);\n  // 将 RpcContext 放入 IDENTIFIED_CHANNELS 缓存中\n  rpcContext.holdInIdentifiedChannels(IDENTIFIED_CHANNELS);\n  // account-service:127.0.0.1:63353\n  String clientIdentified = rpcContext.getApplicationId() + Constants.CLIENT_ID_SPLIT_CHAR\n    + getClientIpFromChannel(channel);\n  // 将请求信息存入 TM_CHANNELS 缓存中\n  TM_CHANNELS.putIfAbsent(clientIdentified, new ConcurrentHashMap<Integer, RpcContext>());\n  // 将上一步创建好的get出来,之后再将rpcContext放入这个map的value中\n  ConcurrentMap<Integer, RpcContext> clientIdentifiedMap = TM_CHANNELS.get(clientIdentified);\n  rpcContext.holdInClientChannels(clientIdentifiedMap);\n}\n
\n

TM client 的注册大体类似,把本次注册的信息放入对应的缓存中保存,但比 RM client 的注册逻辑简单一些,主要是 RM client 会涉及分支事务资源的信息,需要注册的信息也会比 TM client 多。

\n

以上源码分析基于 0.9.0 版本。

\n

作者简介

\n

张乘辉,目前就职于中通科技信息中心技术平台部,担任 Java 工程师,主要负责中通消息平台与全链路压测项目的研发,热爱分享技术,微信公众号「后端进阶」作者,技术博客(https://objcoding.com/)博主,Seata Contributor,GitHub ID:objcoding。

\n", - "link": "/zh-cn/blog/seata-at-mode-start.html", - "meta": { - "title": "Seata AT 模式启动源码分析", - "author": "张乘辉", - "keywords": "Seata、分布式事务、AT模式", - "description": "Seata 源码分析系列", - "date": "2019/11/27" - } -} \ No newline at end of file diff --git a/zh-cn/blog/seata-at-tcc-saga.html b/zh-cn/blog/seata-at-tcc-saga.html deleted file mode 100644 index 3013ecbd..00000000 --- a/zh-cn/blog/seata-at-tcc-saga.html +++ /dev/null @@ -1,181 +0,0 @@ - - - - - - - - - - 分布式事务 Seata 及其三种模式详解 - - - - -

分布式事务 Seata 及其三种模式详解 | Meetup#3 回顾

-

作者:屹远(陈龙),蚂蚁金服分布式事务框架核心研发,Seata Committer。 -
本文根据 8 月 11 日 SOFA Meetup#3 广州站 《分布式事务 Seata 及其三种模式详解》主题分享整理,着重分享分布式事务产生的背景、理论基础,以及 Seata 分布式事务的原理以及三种模式(AT、TCC、Saga)的分布式事务实现。

-

现场回顾视频以及 PPT 见文末链接。

-

3 分布式事务 Seata 三种模式详解-屹远.jpg

-

-

一、分布式事务产生的背景

-

-

1.1 分布式架构演进之 - 数据库的水平拆分

-

蚂蚁金服的业务数据库起初是单库单表,但随着业务数据规模的快速发展,数据量越来越大,单库单表逐渐成为瓶颈。所以我们对数据库进行了水平拆分,将原单库单表拆分成数据库分片。

-

如下图所示,分库分表之后,原来在一个数据库上就能完成的写操作,可能就会跨多个数据库,这就产生了跨数据库事务问题。

-

image.png

-

-

1.2 分布式架构演进之 - 业务服务化拆分

-

在业务发展初期,“一块大饼”的单业务系统架构,能满足基本的业务需求。但是随着业务的快速发展,系统的访问量和业务复杂程度都在快速增长,单系统架构逐渐成为业务发展瓶颈,解决业务系统的高耦合、可伸缩问题的需求越来越强烈。

-

如下图所示,蚂蚁金服按照面向服务架构(SOA)的设计原则,将单业务系统拆分成多个业务系统,降低了各系统之间的耦合度,使不同的业务系统专注于自身业务,更有利于业务的发展和系统容量的伸缩。

-

image.png

-

业务系统按照服务拆分之后,一个完整的业务往往需要调用多个服务,如何保证多个服务间的数据一致性成为一个难题。

-

-

二、分布式事务理论基础

-

-

2.1 两阶段提交协议

-

16_16_18__08_13_2019.jpg

-

两阶段提交协议:事务管理器分两个阶段来协调资源管理器,第一阶段准备资源,也就是预留事务所需的资源,如果每个资源管理器都资源预留成功,则进行第二阶段资源提交,否则协调资源管理器回滚资源。

-

-

2.2 TCC

-

16_16_51__08_13_2019.jpg

-

TCC(Try-Confirm-Cancel) 实际上是服务化的两阶段提交协议,业务开发者需要实现这三个服务接口,第一阶段服务由业务代码编排来调用 Try 接口进行资源预留,所有参与者的 Try 接口都成功了,事务管理器会提交事务,并调用每个参与者的 Confirm 接口真正提交业务操作,否则调用每个参与者的 Cancel 接口回滚事务。

-

-

2.3 Saga

-

3 分布式事务 Seata 三种模式详解-屹远-9.jpg

-

Saga 是一种补偿协议,在 Saga 模式下,分布式事务内有多个参与者,每一个参与者都是一个冲正补偿服务,需要用户根据业务场景实现其正向操作和逆向回滚操作。

-

分布式事务执行过程中,依次执行各参与者的正向操作,如果所有正向操作均执行成功,那么分布式事务提交。如果任何一个正向操作执行失败,那么分布式事务会退回去执行前面各参与者的逆向回滚操作,回滚已提交的参与者,使分布式事务回到初始状态。

-

Saga 理论出自 Hector & Kenneth 1987发表的论文 Sagas。
-
Saga 正向服务与补偿服务也需要业务开发者实现。

-

-

三、Seata 及其三种模式详解

-

-

3.1 分布式事务 Seata 介绍

-

Seata(Simple Extensible Autonomous Transaction Architecture,简单可扩展自治事务框架)是 2019 年 1 月份蚂蚁金服和阿里巴巴共同开源的分布式事务解决方案。Seata 开源半年左右,目前已经有超过 1.1 万 star,社区非常活跃。我们热忱欢迎大家参与到 Seata 社区建设中,一同将 Seata 打造成开源分布式事务标杆产品。

-

Seata:https://github.com/seata/seata
-
image.png

-

-

3.2 分布式事务 Seata 产品模块

-

如下图所示,Seata 中有三大模块,分别是 TM、RM 和 TC。 其中 TM 和 RM 是作为 Seata 的客户端与业务系统集成在一起,TC 作为 Seata 的服务端独立部署。

-

image.png

-

在 Seata 中,分布式事务的执行流程:

-
    -
  • TM 开启分布式事务(TM 向 TC 注册全局事务记录);
  • -
  • 按业务场景,编排数据库、服务等事务内资源(RM 向 TC 汇报资源准备状态 );
  • -
  • TM 结束分布式事务,事务一阶段结束(TM 通知 TC 提交/回滚分布式事务);
  • -
  • TC 汇总事务信息,决定分布式事务是提交还是回滚;
  • -
  • TC 通知所有 RM 提交/回滚 资源,事务二阶段结束;
  • -
-

-

3.3 分布式事务 Seata 解决方案

-

Seata 会有 4 种分布式事务解决方案,分别是 AT 模式、TCC 模式、Saga 模式和 XA 模式。

-

15_49_23__08_13_2019.jpg

-

-

2.3.1 AT 模式

-

今年 1 月份,Seata 开源了 AT 模式。AT 模式是一种无侵入的分布式事务解决方案。在 AT 模式下,用户只需关注自己的“业务 SQL”,用户的 “业务 SQL” 作为一阶段,Seata 框架会自动生成事务的二阶段提交和回滚操作。

-

image.png

-

-
AT 模式如何做到对业务的无侵入 :
-
    -
  • 一阶段:
  • -
-

在一阶段,Seata 会拦截“业务 SQL”,首先解析 SQL 语义,找到“业务 SQL”要更新的业务数据,在业务数据被更新前,将其保存成“before image”,然后执行“业务 SQL”更新业务数据,在业务数据更新之后,再将其保存成“after image”,最后生成行锁。以上操作全部在一个数据库事务内完成,这样保证了一阶段操作的原子性。

-

图片3.png

-
    -
  • 二阶段提交:
  • -
-

二阶段如果是提交的话,因为“业务 SQL”在一阶段已经提交至数据库, 所以 Seata 框架只需将一阶段保存的快照数据和行锁删掉,完成数据清理即可。

-

图片4.png

-
    -
  • 二阶段回滚:
  • -
-

二阶段如果是回滚的话,Seata 就需要回滚一阶段已经执行的“业务 SQL”,还原业务数据。回滚方式便是用“before image”还原业务数据;但在还原前要首先要校验脏写,对比“数据库当前业务数据”和 “after image”,如果两份数据完全一致就说明没有脏写,可以还原业务数据,如果不一致就说明有脏写,出现脏写就需要转人工处理。

-

图片5.png

-

AT 模式的一阶段、二阶段提交和回滚均由 Seata 框架自动生成,用户只需编写“业务 SQL”,便能轻松接入分布式事务,AT 模式是一种对业务无任何侵入的分布式事务解决方案。

-

-

2.3.2 TCC 模式

-

2019 年 3 月份,Seata 开源了 TCC 模式,该模式由蚂蚁金服贡献。TCC 模式需要用户根据自己的业务场景实现 Try、Confirm 和 Cancel 三个操作;事务发起方在一阶段执行 Try 方式,在二阶段提交执行 Confirm 方法,二阶段回滚执行 Cancel 方法。

-

图片6.png

-

TCC 三个方法描述:

-
    -
  • Try:资源的检测和预留;
  • -
  • Confirm:执行的业务操作提交;要求 Try 成功 Confirm 一定要能成功;
  • -
  • Cancel:预留资源释放;
  • -
-

蚂蚁金服在 TCC 的实践经验
**
16_48_02__08_13_2019.jpg

-

1 TCC 设计 - 业务模型分 2 阶段设计:

-

用户接入 TCC ,最重要的是考虑如何将自己的业务模型拆成两阶段来实现。

-

以“扣钱”场景为例,在接入 TCC 前,对 A 账户的扣钱,只需一条更新账户余额的 SQL 便能完成;但是在接入 TCC 之后,用户就需要考虑如何将原来一步就能完成的扣钱操作,拆成两阶段,实现成三个方法,并且保证一阶段 Try  成功的话 二阶段 Confirm 一定能成功。

-

图片7.png

-

如上图所示,

-

Try 方法作为一阶段准备方法,需要做资源的检查和预留。在扣钱场景下,Try 要做的事情是就是检查账户余额是否充足,预留转账资金,预留的方式就是冻结 A 账户的 转账资金。Try 方法执行之后,账号 A 余额虽然还是 100,但是其中 30 元已经被冻结了,不能被其他事务使用。

-

二阶段 Confirm 方法执行真正的扣钱操作。Confirm 会使用 Try 阶段冻结的资金,执行账号扣款。Confirm 方法执行之后,账号 A 在一阶段中冻结的 30 元已经被扣除,账号 A 余额变成 70 元 。

-

如果二阶段是回滚的话,就需要在 Cancel 方法内释放一阶段 Try 冻结的 30 元,使账号 A 的回到初始状态,100 元全部可用。

-

用户接入 TCC 模式,最重要的事情就是考虑如何将业务模型拆成 2 阶段,实现成 TCC 的 3 个方法,并且保证 Try 成功 Confirm 一定能成功。相对于 AT 模式,TCC 模式对业务代码有一定的侵入性,但是 TCC 模式无 AT 模式的全局行锁,TCC 性能会比 AT 模式高很多。

-

2 TCC 设计 - 允许空回滚:
**
16_51_44__08_13_2019.jpg

-

Cancel 接口设计时需要允许空回滚。在 Try 接口因为丢包时没有收到,事务管理器会触发回滚,这时会触发 Cancel 接口,这时 Cancel 执行时发现没有对应的事务 xid 或主键时,需要返回回滚成功。让事务服务管理器认为已回滚,否则会不断重试,而 Cancel 又没有对应的业务数据可以进行回滚。

-

3 TCC 设计 - 防悬挂控制:
**
16_51_56__08_13_2019.jpg

-

悬挂的意思是:Cancel 比 Try 接口先执行,出现的原因是 Try 由于网络拥堵而超时,事务管理器生成回滚,触发 Cancel 接口,而最终又收到了 Try 接口调用,但是 Cancel 比 Try 先到。按照前面允许空回滚的逻辑,回滚会返回成功,事务管理器认为事务已回滚成功,则此时的 Try 接口不应该执行,否则会产生数据不一致,所以我们在 Cancel 空回滚返回成功之前先记录该条事务 xid 或业务主键,标识这条记录已经回滚过,Try 接口先检查这条事务xid或业务主键如果已经标记为回滚成功过,则不执行 Try 的业务操作。

-

4 TCC 设计 - 幂等控制:
**
16_52_07__08_13_2019.jpg

-

幂等性的意思是:对同一个系统,使用同样的条件,一次请求和重复的多次请求对系统资源的影响是一致的。因为网络抖动或拥堵可能会超时,事务管理器会对资源进行重试操作,所以很可能一个业务操作会被重复调用,为了不因为重复调用而多次占用资源,需要对服务设计时进行幂等控制,通常我们可以用事务 xid 或业务主键判重来控制。

-

-

2.3.3 Saga 模式

-

Saga 模式是 Seata 即将开源的长事务解决方案,将由蚂蚁金服主要贡献。在 Saga 模式下,分布式事务内有多个参与者,每一个参与者都是一个冲正补偿服务,需要用户根据业务场景实现其正向操作和逆向回滚操作。

-

分布式事务执行过程中,依次执行各参与者的正向操作,如果所有正向操作均执行成功,那么分布式事务提交。如果任何一个正向操作执行失败,那么分布式事务会去退回去执行前面各参与者的逆向回滚操作,回滚已提交的参与者,使分布式事务回到初始状态。

-

图片8.png

-

Saga 模式下分布式事务通常是由事件驱动的,各个参与者之间是异步执行的,Saga 模式是一种长事务解决方案。

-

1 Saga 模式使用场景
**
16_44_58__08_13_2019.jpg

-

Saga 模式适用于业务流程长且需要保证事务最终一致性的业务系统,Saga 模式一阶段就会提交本地事务,无锁、长流程情况下可以保证性能。

-

事务参与者可能是其它公司的服务或者是遗留系统的服务,无法进行改造和提供 TCC 要求的接口,可以使用 Saga 模式。

-

Saga模式的优势是:

-
    -
  • 一阶段提交本地数据库事务,无锁,高性能;
  • -
  • 参与者可以采用事务驱动异步执行,高吞吐;
  • -
  • 补偿服务即正向服务的“反向”,易于理解,易于实现;
  • -
-

缺点:Saga 模式由于一阶段已经提交本地数据库事务,且没有进行“预留”动作,所以不能保证隔离性。后续会讲到对于缺乏隔离性的应对措施。
2 基于状态机引擎的 Saga 实现

-

17_13_19__08_13_2019.jpg

-

目前 Saga 的实现一般有两种,一种是通过事件驱动架构实现,一种是基于注解加拦截器拦截业务的正向服务实现。Seata 目前是采用事件驱动的机制来实现的,Seata 实现了一个状态机,可以编排服务的调用流程及正向服务的补偿服务,生成一个 json 文件定义的状态图,状态机引擎驱动到这个图的运行,当发生异常的时候状态机触发回滚,逐个执行补偿服务。当然在什么情况下触发回滚用户是可以自定义决定的。该状态机可以实现服务编排的需求,它支持单项选择、并发、异步、子状态机调用、参数转换、参数映射、服务执行状态判断、异常捕获等功能。

-

3 状态机引擎原理

-

16_45_32__08_13_2019.jpg

-

该状态机引擎的基本原理是,它基于事件驱动架构,每个步骤都是异步执行的,步骤与步骤之间通过事件队列流转,
极大的提高系统吞吐量。每个步骤执行时会记录事务日志,用于出现异常时回滚时使用,事务日志会记录在与业务表所在的数据库内,提高性能。

-

4 状态机引擎设计

-

16_45_46__08_13_2019.jpg

-

该状态机引擎分成了三层架构的设计,最底层是“事件驱动”层,实现了 EventBus 和消费事件的线程池,是一个 Pub-Sub 的架构。第二层是“流程控制器”层,它实现了一个极简的流程引擎框架,它驱动一个“空”的流程执行,“空”的意思是指它不关心流程节点做什么事情,它只执行每个节点的 process 方法,然后执行 route 方法流转到下一个节点。这是一个通用框架,基于这两层,开发者可以实现任何流程引擎。最上层是“状态机引擎”层,它实现了每种状态节点的“行为”及“路由”逻辑代码,提供 API 和状态图仓库,同时还有一些其它组件,比如表达式语言、逻辑计算器、流水生成器、拦截器、配置管理、事务日志记录等。

-

5 Saga 服务设计经验

-

和TCC类似,Saga的正向服务与反向服务也需求遵循以下设计原则:

-

1)Saga 服务设计 - 允许空补偿
**
16_52_22__08_13_2019.jpg

-

2)Saga 服务设计 - 防悬挂控制
**
16_52_52__08_13_2019.jpg

-

3)Saga 服务设计 - 幂等控制
**
3 分布式事务 Seata 三种模式详解-屹远-31.jpg

-

4)Saga 设计 - 自定义事务恢复策略
**
16_53_07__08_13_2019.jpg

-

前面讲到 Saga 模式不保证事务的隔离性,在极端情况下可能出现脏写。比如在分布式事务未提交的情况下,前一个服务的数据被修改了,而后面的服务发生了异常需要进行回滚,可能由于前面服务的数据被修改后无法进行补偿操作。这时的一种处理办法可以是“重试”继续往前完成这个分布式事务。由于整个业务流程是由状态机编排的,即使是事后恢复也可以继续往前重试。所以用户可以根据业务特点配置该流程的事务处理策略是优先“回滚”还是“重试”,当事务超时的时候,Server 端会根据这个策略不断进行重试。

-

由于 Saga 不保证隔离性,所以我们在业务设计的时候需要做到“宁可长款,不可短款”的原则,长款是指在出现差错的时候站在我方的角度钱多了的情况,钱少了则是短款,因为如果长款可以给客户退款,而短款则可能钱追不回来了,也就是说在业务设计的时候,一定是先扣客户帐再入帐,如果因为隔离性问题造成覆盖更新,也不会出现钱少了的情况。

-

6 基于注解和拦截器的 Saga 实现
**
17_13_37__08_13_2019.jpg

-

还有一种 Saga 的实现是基于注解+拦截器的实现,Seata 目前没有实现,可以看上面的伪代码来理解一下,one 方法上定义了 @SagaCompensable 的注解,用于定义 one 方法的补偿方法是 compensateOne 方法。然后在业务流程代码 processA 方法上定义 @SagaTransactional 注解,启动 Saga 分布式事务,通过拦截器拦截每个正向方法当出现异常的时候触发回滚操作,调用正向方法的补偿方法。

-

7 两种 Saga 实现优劣对比

-

两种 Saga 的实现各有又缺点,下面表格是一个对比:

-

17_13_49__08_13_2019.jpg

-

状态机引擎的最大优势是可以通过事件驱动的方法异步执行提高系统吞吐,可以实现服务编排需求,在 Saga 模式缺乏隔离性的情况下,可以多一种“向前重试”的事情恢复策略。注解加拦截器的的最大优势是,开发简单、学习成本低。

-

-

总结

-

本文先回顾了分布式事务产生的背景及理论基础,然后重点讲解了 Seata 分布式事务的原理以及三种模式(AT、TCC、Saga)的分布式事务实现。

-

Seata 的定位是分布式事全场景解决方案,未来还会有 XA 模式的分布式事务实现,每种模式都有它的适用场景,AT 模式是无侵入的分布式事务解决方案,适用于不希望对业务进行改造的场景,几乎0学习成本。TCC 模式是高性能分布式事务解决方案,适用于核心系统等对性能有很高要求的场景。Saga 模式是长事务解决方案,适用于业务流程长且需要保证事务最终一致性的业务系统,Saga 模式一阶段就会提交本地事务,无锁,长流程情况下可以保证性能,多用于渠道层、集成层业务系统。事务参与者可能是其它公司的服务或者是遗留系统的服务,无法进行改造和提供 TCC 要求的接口,也可以使用 Saga 模式。

-

本次分享的视频回顾以及PPT 查看地址:https://tech.antfin.com/community/activities/779/review

-
- - - - - - - diff --git a/zh-cn/blog/seata-at-tcc-saga.json b/zh-cn/blog/seata-at-tcc-saga.json deleted file mode 100644 index 07a7e19a..00000000 --- a/zh-cn/blog/seata-at-tcc-saga.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "filename": "seata-at-tcc-saga.md", - "__html": "

分布式事务 Seata 及其三种模式详解 | Meetup#3 回顾

\n

作者:屹远(陈龙),蚂蚁金服分布式事务框架核心研发,Seata Committer。\n
本文根据 8 月 11 日 SOFA Meetup#3 广州站 《分布式事务 Seata 及其三种模式详解》主题分享整理,着重分享分布式事务产生的背景、理论基础,以及 Seata 分布式事务的原理以及三种模式(AT、TCC、Saga)的分布式事务实现。

\n

现场回顾视频以及 PPT 见文末链接。

\n

\"3

\n

\n

一、分布式事务产生的背景

\n

\n

1.1 分布式架构演进之 - 数据库的水平拆分

\n

蚂蚁金服的业务数据库起初是单库单表,但随着业务数据规模的快速发展,数据量越来越大,单库单表逐渐成为瓶颈。所以我们对数据库进行了水平拆分,将原单库单表拆分成数据库分片。

\n

如下图所示,分库分表之后,原来在一个数据库上就能完成的写操作,可能就会跨多个数据库,这就产生了跨数据库事务问题。

\n

\"image.png\"

\n

\n

1.2 分布式架构演进之 - 业务服务化拆分

\n

在业务发展初期,“一块大饼”的单业务系统架构,能满足基本的业务需求。但是随着业务的快速发展,系统的访问量和业务复杂程度都在快速增长,单系统架构逐渐成为业务发展瓶颈,解决业务系统的高耦合、可伸缩问题的需求越来越强烈。

\n

如下图所示,蚂蚁金服按照面向服务架构(SOA)的设计原则,将单业务系统拆分成多个业务系统,降低了各系统之间的耦合度,使不同的业务系统专注于自身业务,更有利于业务的发展和系统容量的伸缩。

\n

\"image.png\"

\n

业务系统按照服务拆分之后,一个完整的业务往往需要调用多个服务,如何保证多个服务间的数据一致性成为一个难题。

\n

\n

二、分布式事务理论基础

\n

\n

2.1 两阶段提交协议

\n

\"16_16_18__08_13_2019.jpg\"

\n

两阶段提交协议:事务管理器分两个阶段来协调资源管理器,第一阶段准备资源,也就是预留事务所需的资源,如果每个资源管理器都资源预留成功,则进行第二阶段资源提交,否则协调资源管理器回滚资源。

\n

\n

2.2 TCC

\n

\"16_16_51__08_13_2019.jpg\"

\n

TCC(Try-Confirm-Cancel) 实际上是服务化的两阶段提交协议,业务开发者需要实现这三个服务接口,第一阶段服务由业务代码编排来调用 Try 接口进行资源预留,所有参与者的 Try 接口都成功了,事务管理器会提交事务,并调用每个参与者的 Confirm 接口真正提交业务操作,否则调用每个参与者的 Cancel 接口回滚事务。

\n

\n

2.3 Saga

\n

\"3

\n

Saga 是一种补偿协议,在 Saga 模式下,分布式事务内有多个参与者,每一个参与者都是一个冲正补偿服务,需要用户根据业务场景实现其正向操作和逆向回滚操作。

\n

分布式事务执行过程中,依次执行各参与者的正向操作,如果所有正向操作均执行成功,那么分布式事务提交。如果任何一个正向操作执行失败,那么分布式事务会退回去执行前面各参与者的逆向回滚操作,回滚已提交的参与者,使分布式事务回到初始状态。

\n

Saga 理论出自 Hector & Kenneth 1987发表的论文 Sagas。
\n
Saga 正向服务与补偿服务也需要业务开发者实现。

\n

\n

三、Seata 及其三种模式详解

\n

\n

3.1 分布式事务 Seata 介绍

\n

Seata(Simple Extensible Autonomous Transaction Architecture,简单可扩展自治事务框架)是 2019 年 1 月份蚂蚁金服和阿里巴巴共同开源的分布式事务解决方案。Seata 开源半年左右,目前已经有超过 1.1 万 star,社区非常活跃。我们热忱欢迎大家参与到 Seata 社区建设中,一同将 Seata 打造成开源分布式事务标杆产品。

\n

Seata:https://github.com/seata/seata
\n
\"image.png\"

\n

\n

3.2 分布式事务 Seata 产品模块

\n

如下图所示,Seata 中有三大模块,分别是 TM、RM 和 TC。 其中 TM 和 RM 是作为 Seata 的客户端与业务系统集成在一起,TC 作为 Seata 的服务端独立部署。

\n

\"image.png\"

\n

在 Seata 中,分布式事务的执行流程:

\n
    \n
  • TM 开启分布式事务(TM 向 TC 注册全局事务记录);
  • \n
  • 按业务场景,编排数据库、服务等事务内资源(RM 向 TC 汇报资源准备状态 );
  • \n
  • TM 结束分布式事务,事务一阶段结束(TM 通知 TC 提交/回滚分布式事务);
  • \n
  • TC 汇总事务信息,决定分布式事务是提交还是回滚;
  • \n
  • TC 通知所有 RM 提交/回滚 资源,事务二阶段结束;
  • \n
\n

\n

3.3 分布式事务 Seata 解决方案

\n

Seata 会有 4 种分布式事务解决方案,分别是 AT 模式、TCC 模式、Saga 模式和 XA 模式。

\n

\"15_49_23__08_13_2019.jpg\"

\n

\n

2.3.1 AT 模式

\n

今年 1 月份,Seata 开源了 AT 模式。AT 模式是一种无侵入的分布式事务解决方案。在 AT 模式下,用户只需关注自己的“业务 SQL”,用户的 “业务 SQL” 作为一阶段,Seata 框架会自动生成事务的二阶段提交和回滚操作。

\n

\"image.png\"

\n

\n
AT 模式如何做到对业务的无侵入 :
\n
    \n
  • 一阶段:
  • \n
\n

在一阶段,Seata 会拦截“业务 SQL”,首先解析 SQL 语义,找到“业务 SQL”要更新的业务数据,在业务数据被更新前,将其保存成“before image”,然后执行“业务 SQL”更新业务数据,在业务数据更新之后,再将其保存成“after image”,最后生成行锁。以上操作全部在一个数据库事务内完成,这样保证了一阶段操作的原子性。

\n

\"图片3.png\"

\n
    \n
  • 二阶段提交:
  • \n
\n

二阶段如果是提交的话,因为“业务 SQL”在一阶段已经提交至数据库, 所以 Seata 框架只需将一阶段保存的快照数据和行锁删掉,完成数据清理即可。

\n

\"图片4.png\"

\n
    \n
  • 二阶段回滚:
  • \n
\n

二阶段如果是回滚的话,Seata 就需要回滚一阶段已经执行的“业务 SQL”,还原业务数据。回滚方式便是用“before image”还原业务数据;但在还原前要首先要校验脏写,对比“数据库当前业务数据”和 “after image”,如果两份数据完全一致就说明没有脏写,可以还原业务数据,如果不一致就说明有脏写,出现脏写就需要转人工处理。

\n

\"图片5.png\"

\n

AT 模式的一阶段、二阶段提交和回滚均由 Seata 框架自动生成,用户只需编写“业务 SQL”,便能轻松接入分布式事务,AT 模式是一种对业务无任何侵入的分布式事务解决方案。

\n

\n

2.3.2 TCC 模式

\n

2019 年 3 月份,Seata 开源了 TCC 模式,该模式由蚂蚁金服贡献。TCC 模式需要用户根据自己的业务场景实现 Try、Confirm 和 Cancel 三个操作;事务发起方在一阶段执行 Try 方式,在二阶段提交执行 Confirm 方法,二阶段回滚执行 Cancel 方法。

\n

\"图片6.png\"

\n

TCC 三个方法描述:

\n
    \n
  • Try:资源的检测和预留;
  • \n
  • Confirm:执行的业务操作提交;要求 Try 成功 Confirm 一定要能成功;
  • \n
  • Cancel:预留资源释放;
  • \n
\n

蚂蚁金服在 TCC 的实践经验
**
\"16_48_02__08_13_2019.jpg\"

\n

1 TCC 设计 - 业务模型分 2 阶段设计:

\n

用户接入 TCC ,最重要的是考虑如何将自己的业务模型拆成两阶段来实现。

\n

以“扣钱”场景为例,在接入 TCC 前,对 A 账户的扣钱,只需一条更新账户余额的 SQL 便能完成;但是在接入 TCC 之后,用户就需要考虑如何将原来一步就能完成的扣钱操作,拆成两阶段,实现成三个方法,并且保证一阶段 Try  成功的话 二阶段 Confirm 一定能成功。

\n

\"图片7.png\"

\n

如上图所示,

\n

Try 方法作为一阶段准备方法,需要做资源的检查和预留。在扣钱场景下,Try 要做的事情是就是检查账户余额是否充足,预留转账资金,预留的方式就是冻结 A 账户的 转账资金。Try 方法执行之后,账号 A 余额虽然还是 100,但是其中 30 元已经被冻结了,不能被其他事务使用。

\n

二阶段 Confirm 方法执行真正的扣钱操作。Confirm 会使用 Try 阶段冻结的资金,执行账号扣款。Confirm 方法执行之后,账号 A 在一阶段中冻结的 30 元已经被扣除,账号 A 余额变成 70 元 。

\n

如果二阶段是回滚的话,就需要在 Cancel 方法内释放一阶段 Try 冻结的 30 元,使账号 A 的回到初始状态,100 元全部可用。

\n

用户接入 TCC 模式,最重要的事情就是考虑如何将业务模型拆成 2 阶段,实现成 TCC 的 3 个方法,并且保证 Try 成功 Confirm 一定能成功。相对于 AT 模式,TCC 模式对业务代码有一定的侵入性,但是 TCC 模式无 AT 模式的全局行锁,TCC 性能会比 AT 模式高很多。

\n

2 TCC 设计 - 允许空回滚:
**
\"16_51_44__08_13_2019.jpg\"

\n

Cancel 接口设计时需要允许空回滚。在 Try 接口因为丢包时没有收到,事务管理器会触发回滚,这时会触发 Cancel 接口,这时 Cancel 执行时发现没有对应的事务 xid 或主键时,需要返回回滚成功。让事务服务管理器认为已回滚,否则会不断重试,而 Cancel 又没有对应的业务数据可以进行回滚。

\n

3 TCC 设计 - 防悬挂控制:
**
\"16_51_56__08_13_2019.jpg\"

\n

悬挂的意思是:Cancel 比 Try 接口先执行,出现的原因是 Try 由于网络拥堵而超时,事务管理器生成回滚,触发 Cancel 接口,而最终又收到了 Try 接口调用,但是 Cancel 比 Try 先到。按照前面允许空回滚的逻辑,回滚会返回成功,事务管理器认为事务已回滚成功,则此时的 Try 接口不应该执行,否则会产生数据不一致,所以我们在 Cancel 空回滚返回成功之前先记录该条事务 xid 或业务主键,标识这条记录已经回滚过,Try 接口先检查这条事务xid或业务主键如果已经标记为回滚成功过,则不执行 Try 的业务操作。

\n

4 TCC 设计 - 幂等控制:
**
\"16_52_07__08_13_2019.jpg\"

\n

幂等性的意思是:对同一个系统,使用同样的条件,一次请求和重复的多次请求对系统资源的影响是一致的。因为网络抖动或拥堵可能会超时,事务管理器会对资源进行重试操作,所以很可能一个业务操作会被重复调用,为了不因为重复调用而多次占用资源,需要对服务设计时进行幂等控制,通常我们可以用事务 xid 或业务主键判重来控制。

\n

\n

2.3.3 Saga 模式

\n

Saga 模式是 Seata 即将开源的长事务解决方案,将由蚂蚁金服主要贡献。在 Saga 模式下,分布式事务内有多个参与者,每一个参与者都是一个冲正补偿服务,需要用户根据业务场景实现其正向操作和逆向回滚操作。

\n

分布式事务执行过程中,依次执行各参与者的正向操作,如果所有正向操作均执行成功,那么分布式事务提交。如果任何一个正向操作执行失败,那么分布式事务会去退回去执行前面各参与者的逆向回滚操作,回滚已提交的参与者,使分布式事务回到初始状态。

\n

\"图片8.png\"

\n

Saga 模式下分布式事务通常是由事件驱动的,各个参与者之间是异步执行的,Saga 模式是一种长事务解决方案。

\n

1 Saga 模式使用场景
**
\"16_44_58__08_13_2019.jpg\"

\n

Saga 模式适用于业务流程长且需要保证事务最终一致性的业务系统,Saga 模式一阶段就会提交本地事务,无锁、长流程情况下可以保证性能。

\n

事务参与者可能是其它公司的服务或者是遗留系统的服务,无法进行改造和提供 TCC 要求的接口,可以使用 Saga 模式。

\n

Saga模式的优势是:

\n
    \n
  • 一阶段提交本地数据库事务,无锁,高性能;
  • \n
  • 参与者可以采用事务驱动异步执行,高吞吐;
  • \n
  • 补偿服务即正向服务的“反向”,易于理解,易于实现;
  • \n
\n

缺点:Saga 模式由于一阶段已经提交本地数据库事务,且没有进行“预留”动作,所以不能保证隔离性。后续会讲到对于缺乏隔离性的应对措施。
2 基于状态机引擎的 Saga 实现

\n

\"17_13_19__08_13_2019.jpg\"

\n

目前 Saga 的实现一般有两种,一种是通过事件驱动架构实现,一种是基于注解加拦截器拦截业务的正向服务实现。Seata 目前是采用事件驱动的机制来实现的,Seata 实现了一个状态机,可以编排服务的调用流程及正向服务的补偿服务,生成一个 json 文件定义的状态图,状态机引擎驱动到这个图的运行,当发生异常的时候状态机触发回滚,逐个执行补偿服务。当然在什么情况下触发回滚用户是可以自定义决定的。该状态机可以实现服务编排的需求,它支持单项选择、并发、异步、子状态机调用、参数转换、参数映射、服务执行状态判断、异常捕获等功能。

\n

3 状态机引擎原理

\n

\"16_45_32__08_13_2019.jpg\"

\n

该状态机引擎的基本原理是,它基于事件驱动架构,每个步骤都是异步执行的,步骤与步骤之间通过事件队列流转,
极大的提高系统吞吐量。每个步骤执行时会记录事务日志,用于出现异常时回滚时使用,事务日志会记录在与业务表所在的数据库内,提高性能。

\n

4 状态机引擎设计

\n

\"16_45_46__08_13_2019.jpg\"

\n

该状态机引擎分成了三层架构的设计,最底层是“事件驱动”层,实现了 EventBus 和消费事件的线程池,是一个 Pub-Sub 的架构。第二层是“流程控制器”层,它实现了一个极简的流程引擎框架,它驱动一个“空”的流程执行,“空”的意思是指它不关心流程节点做什么事情,它只执行每个节点的 process 方法,然后执行 route 方法流转到下一个节点。这是一个通用框架,基于这两层,开发者可以实现任何流程引擎。最上层是“状态机引擎”层,它实现了每种状态节点的“行为”及“路由”逻辑代码,提供 API 和状态图仓库,同时还有一些其它组件,比如表达式语言、逻辑计算器、流水生成器、拦截器、配置管理、事务日志记录等。

\n

5 Saga 服务设计经验

\n

和TCC类似,Saga的正向服务与反向服务也需求遵循以下设计原则:

\n

1)Saga 服务设计 - 允许空补偿
**
\"16_52_22__08_13_2019.jpg\"

\n

2)Saga 服务设计 - 防悬挂控制
**
\"16_52_52__08_13_2019.jpg\"

\n

3)Saga 服务设计 - 幂等控制
**
\"3

\n

4)Saga 设计 - 自定义事务恢复策略
**
\"16_53_07__08_13_2019.jpg\"

\n

前面讲到 Saga 模式不保证事务的隔离性,在极端情况下可能出现脏写。比如在分布式事务未提交的情况下,前一个服务的数据被修改了,而后面的服务发生了异常需要进行回滚,可能由于前面服务的数据被修改后无法进行补偿操作。这时的一种处理办法可以是“重试”继续往前完成这个分布式事务。由于整个业务流程是由状态机编排的,即使是事后恢复也可以继续往前重试。所以用户可以根据业务特点配置该流程的事务处理策略是优先“回滚”还是“重试”,当事务超时的时候,Server 端会根据这个策略不断进行重试。

\n

由于 Saga 不保证隔离性,所以我们在业务设计的时候需要做到“宁可长款,不可短款”的原则,长款是指在出现差错的时候站在我方的角度钱多了的情况,钱少了则是短款,因为如果长款可以给客户退款,而短款则可能钱追不回来了,也就是说在业务设计的时候,一定是先扣客户帐再入帐,如果因为隔离性问题造成覆盖更新,也不会出现钱少了的情况。

\n

6 基于注解和拦截器的 Saga 实现
**
\"17_13_37__08_13_2019.jpg\"

\n

还有一种 Saga 的实现是基于注解+拦截器的实现,Seata 目前没有实现,可以看上面的伪代码来理解一下,one 方法上定义了 @SagaCompensable 的注解,用于定义 one 方法的补偿方法是 compensateOne 方法。然后在业务流程代码 processA 方法上定义 @SagaTransactional 注解,启动 Saga 分布式事务,通过拦截器拦截每个正向方法当出现异常的时候触发回滚操作,调用正向方法的补偿方法。

\n

7 两种 Saga 实现优劣对比

\n

两种 Saga 的实现各有又缺点,下面表格是一个对比:

\n

\"17_13_49__08_13_2019.jpg\"

\n

状态机引擎的最大优势是可以通过事件驱动的方法异步执行提高系统吞吐,可以实现服务编排需求,在 Saga 模式缺乏隔离性的情况下,可以多一种“向前重试”的事情恢复策略。注解加拦截器的的最大优势是,开发简单、学习成本低。

\n

\n

总结

\n

本文先回顾了分布式事务产生的背景及理论基础,然后重点讲解了 Seata 分布式事务的原理以及三种模式(AT、TCC、Saga)的分布式事务实现。

\n

Seata 的定位是分布式事全场景解决方案,未来还会有 XA 模式的分布式事务实现,每种模式都有它的适用场景,AT 模式是无侵入的分布式事务解决方案,适用于不希望对业务进行改造的场景,几乎0学习成本。TCC 模式是高性能分布式事务解决方案,适用于核心系统等对性能有很高要求的场景。Saga 模式是长事务解决方案,适用于业务流程长且需要保证事务最终一致性的业务系统,Saga 模式一阶段就会提交本地事务,无锁,长流程情况下可以保证性能,多用于渠道层、集成层业务系统。事务参与者可能是其它公司的服务或者是遗留系统的服务,无法进行改造和提供 TCC 要求的接口,也可以使用 Saga 模式。

\n

本次分享的视频回顾以及PPT 查看地址:https://tech.antfin.com/community/activities/779/review

\n", - "link": "/zh-cn/blog/seata-at-tcc-saga.html", - "meta": { - "title": "分布式事务 Seata 及其三种模式详解", - "keywords": "Saga,Seata,AT,TCC,一致性,金融,分布式,事务", - "description": "着重分享分布式事务产生的背景、理论基础,以及 Seata 分布式事务的原理以及三种模式(AT、TCC、Saga)的分布式事务实现", - "author": "long187", - "date": "2019-08-11" - } -} \ No newline at end of file diff --git a/zh-cn/blog/seata-config-center.html b/zh-cn/blog/seata-config-center.html deleted file mode 100644 index 000fd7e3..00000000 --- a/zh-cn/blog/seata-config-center.html +++ /dev/null @@ -1,128 +0,0 @@ - - - - - - - - - - Seata 配置中心实现原理 - - - - -

前言

-

Seata 可以支持多个第三方配置中心,那么 Seata 是如何同时兼容那么多个配置中心的呢?下面我给大家详细介绍下 Seata 配置中心的实现原理。

-

配置中心属性加载

-

在 Seata 配置中心,有两个默认的配置文件:

-

-

file.conf 是默认的配置属性,registry.conf 主要存储第三方注册中心与配置中心的信息,主要有两大块:

-
registry {
-  # file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
-  # ...
-}
-
-config {
-  # file、nacos 、apollo、zk、consul、etcd3
-  type = "file"
-  nacos {
-    serverAddr = "localhost"
-    namespace = ""
-  }
-  file {
-    name = "file.conf"
-  }
-  # ...
-}
-
-

其中 registry 为注册中心的配置属性,这里先不讲,config 为配置中心的属性值,默认为 file 类型,即会加载本地的 file.conf 里面的属性,如果 type 为其它类型,那么会从第三方配置中心加载配置属性值。

-

在 config 模块的 core 目录中,有个配置工厂类 ConfigurationFactory,它的结构如下:

-

-

可以看到都是一些配置的静态常量:

-

REGISTRY_CONF_PREFIX、REGISTRY_CONF_SUFFIX:配置文件名、默认配置文件类型;

-

SYSTEM_PROPERTY_SEATA_CONFIG_NAME、ENV_SEATA_CONFIG_NAME、ENV_SYSTEM_KEY、ENV_PROPERTY_KEY:自定义文件名配置变量,也说明我们可以自定义配置中心的属性文件。

-

ConfigurationFactory 里面有一处静态代码块,如下:

-

io.seata.config.ConfigurationFactory

-

-

根据自定义文件名配置变量找出配置文件名称与类型,如果没有配置,默认使用 registry.conf,FileConfiguration 是 Seata 默认的配置实现类,如果为默认值,则会更具 registry.conf 配置文件生成 FileConfiguration 默认配置对象,这里也可以利用 SPI 机制支持第三方扩展配置实现,具体实现是继承 ExtConfigurationProvider 接口,在META-INF/services/创建一个文件并填写实现类的全路径名,如下所示:

-

-

第三方配置中心实现类加载

-

在静态代码块逻辑加载完配置中心属性之后,Seata 是如何选择配置中心并获取配置中心的属性值的呢?

-

我们刚刚也说了 FileConfiguration 是 Seata 的默认配置实现类,它继承了 AbstractConfiguration,它的基类为 Configuration,提供了获取参数值的方法:

-
short getShort(String dataId, int defaultValue, long timeoutMills);
-int getInt(String dataId, int defaultValue, long timeoutMills);
-long getLong(String dataId, long defaultValue, long timeoutMills);
-// ....
-
-

那么意味着只需要第三方配置中心实现该接口,就可以整合到 Seata 配置中心了,下面我拿 zk 来做例子:

-

首先,第三方配置中心需要实现一个 Provider 类:

-

-

实现的 provider 方法如其名,主要是输出具体的 Configuration 实现类。

-

那么我们是如何获取根据配置去获取对应的第三方配置中心实现类呢?

-

在 Seata 项目中,获取一个第三方配置中心实现类通常是这么做的:

-
Configuration CONFIG = ConfigurationFactory.getInstance();
-
-

在 getInstance() 方法中主要是使用了单例模式构造配置实现类,它的构造具体实现如下:

-

io.seata.config.ConfigurationFactory#buildConfiguration:

-

-

首先从 ConfigurationFactory 中的静态代码块根据 registry.conf 创建的 CURRENT_FILE_INSTANCE 中获取当前环境使用的配置中心,默认为为 File 类型,我们也可以在 registry.conf 配置其它第三方配置中心,这里也是利用了 SPI 机制去加载第三方配置中心的实现类,具体实现如下:

-

-

如上,即是刚刚我所说的 ZookeeperConfigurationProvider 配置实现输出类,我们再来看看这行代码:

-
EnhancedServiceLoader.load(ConfigurationProvider.class,Objects.requireNonNull(configType).name()).provide();
-
-

EnhancedServiceLoader 是 Seata SPI 实现核心类,这行代码会加载 META-INF/services/META-INF/seata/目录中文件填写的类名,那么如果其中有多个配置中心实现类都被加载了怎么办呢?

-

我们注意到 ZookeeperConfigurationProvider 类的上面有一个注解:

-
@LoadLevel(name = "ZK", order = 1)
-
-

在加载多个配置中心实现类时,会根据 order 进行排序:

-

io.seata.common.loader.EnhancedServiceLoader#findAllExtensionClass:

-

-

io.seata.common.loader.EnhancedServiceLoader#loadFile:

-

-

这样,就不会产生冲突了。

-

但是我们发现 Seata 还可以用这个方法进行选择,Seata 在调用 load 方法时,还传了一个参数:

-
Objects.requireNonNull(configType).name()
-
-

ConfigType 为配置中心类型,是个枚举类:

-
public enum ConfigType {
-  File, ZK, Nacos, Apollo, Consul, Etcd3, SpringCloudConfig, Custom;
-}
-
-

我们注意到,LoadLevel 注解上还有一个 name 属性,在进行筛选实现类时,Seata 还做了这个操作:

-

-

根据当前 configType 来判断是否等于 LoadLevel 的 name 属性,如果相等,那么就是当前配置的第三方配置中心实现类。

-

第三方配置中心实现类

-

ZookeeperConfiguration 继承了 AbstractConfiguration,它的构造方法如下:

-

-

构造方法创建了一个 zkClient 对象,这里的 FILE_CONFIG 是什么呢?

-
private static final Configuration FILE_CONFIG = ConfigurationFactory.CURRENT_FILE_INSTANCE;
-
-

原来就是刚刚静态代码块中创建的 registry.conf 配置实现类,从该配置实现类拿到第三方配置中心的相关属性,构造第三方配置中心客户端,然后实现 Configuration 接口时:

-

-

就可以利用客户端相关方法去第三方配置获取对应的参数值了。

-

第三方配置中心配置同步脚本

-

上周末才写好,已经提交 PR 上去了,还处于 review 中,预估会在 Seata 1.0 版本提供给大家使用,敬请期待。

-

具体位置在 Seata 项目的 script 目录中:

-

-

config.txt 为本地配置好的值,搭建好第三方配置中心之后,运行脚本会将 config.txt 的配置同步到第三方配置中心。

-

作者简介

-

张乘辉,目前就职于中通科技信息中心技术平台部,担任 Java 工程师,主要负责中通消息平台与全链路压测项目的研发,热爱分享技术,微信公众号「后端进阶」作者,技术博客(https://objcoding.com/)博主,Seata Contributor,GitHub ID:objcoding。

-
- - - - - - - diff --git a/zh-cn/blog/seata-config-center.json b/zh-cn/blog/seata-config-center.json deleted file mode 100644 index 7f519309..00000000 --- a/zh-cn/blog/seata-config-center.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "filename": "seata-config-center.md", - "__html": "

前言

\n

Seata 可以支持多个第三方配置中心,那么 Seata 是如何同时兼容那么多个配置中心的呢?下面我给大家详细介绍下 Seata 配置中心的实现原理。

\n

配置中心属性加载

\n

在 Seata 配置中心,有两个默认的配置文件:

\n

\"\"

\n

file.conf 是默认的配置属性,registry.conf 主要存储第三方注册中心与配置中心的信息,主要有两大块:

\n
registry {\n  # file 、nacos 、eureka、redis、zk、consul、etcd3、sofa\n  # ...\n}\n\nconfig {\n  # file、nacos 、apollo、zk、consul、etcd3\n  type = \"file\"\n  nacos {\n    serverAddr = \"localhost\"\n    namespace = \"\"\n  }\n  file {\n    name = \"file.conf\"\n  }\n  # ...\n}\n
\n

其中 registry 为注册中心的配置属性,这里先不讲,config 为配置中心的属性值,默认为 file 类型,即会加载本地的 file.conf 里面的属性,如果 type 为其它类型,那么会从第三方配置中心加载配置属性值。

\n

在 config 模块的 core 目录中,有个配置工厂类 ConfigurationFactory,它的结构如下:

\n

\"\"

\n

可以看到都是一些配置的静态常量:

\n

REGISTRY_CONF_PREFIX、REGISTRY_CONF_SUFFIX:配置文件名、默认配置文件类型;

\n

SYSTEM_PROPERTY_SEATA_CONFIG_NAME、ENV_SEATA_CONFIG_NAME、ENV_SYSTEM_KEY、ENV_PROPERTY_KEY:自定义文件名配置变量,也说明我们可以自定义配置中心的属性文件。

\n

ConfigurationFactory 里面有一处静态代码块,如下:

\n

io.seata.config.ConfigurationFactory

\n

\"\"

\n

根据自定义文件名配置变量找出配置文件名称与类型,如果没有配置,默认使用 registry.conf,FileConfiguration 是 Seata 默认的配置实现类,如果为默认值,则会更具 registry.conf 配置文件生成 FileConfiguration 默认配置对象,这里也可以利用 SPI 机制支持第三方扩展配置实现,具体实现是继承 ExtConfigurationProvider 接口,在META-INF/services/创建一个文件并填写实现类的全路径名,如下所示:

\n

\"\"

\n

第三方配置中心实现类加载

\n

在静态代码块逻辑加载完配置中心属性之后,Seata 是如何选择配置中心并获取配置中心的属性值的呢?

\n

我们刚刚也说了 FileConfiguration 是 Seata 的默认配置实现类,它继承了 AbstractConfiguration,它的基类为 Configuration,提供了获取参数值的方法:

\n
short getShort(String dataId, int defaultValue, long timeoutMills);\nint getInt(String dataId, int defaultValue, long timeoutMills);\nlong getLong(String dataId, long defaultValue, long timeoutMills);\n// ....\n
\n

那么意味着只需要第三方配置中心实现该接口,就可以整合到 Seata 配置中心了,下面我拿 zk 来做例子:

\n

首先,第三方配置中心需要实现一个 Provider 类:

\n

\"\"

\n

实现的 provider 方法如其名,主要是输出具体的 Configuration 实现类。

\n

那么我们是如何获取根据配置去获取对应的第三方配置中心实现类呢?

\n

在 Seata 项目中,获取一个第三方配置中心实现类通常是这么做的:

\n
Configuration CONFIG = ConfigurationFactory.getInstance();\n
\n

在 getInstance() 方法中主要是使用了单例模式构造配置实现类,它的构造具体实现如下:

\n

io.seata.config.ConfigurationFactory#buildConfiguration:

\n

\"\"

\n

首先从 ConfigurationFactory 中的静态代码块根据 registry.conf 创建的 CURRENT_FILE_INSTANCE 中获取当前环境使用的配置中心,默认为为 File 类型,我们也可以在 registry.conf 配置其它第三方配置中心,这里也是利用了 SPI 机制去加载第三方配置中心的实现类,具体实现如下:

\n

\"\"

\n

如上,即是刚刚我所说的 ZookeeperConfigurationProvider 配置实现输出类,我们再来看看这行代码:

\n
EnhancedServiceLoader.load(ConfigurationProvider.class,Objects.requireNonNull(configType).name()).provide();\n
\n

EnhancedServiceLoader 是 Seata SPI 实现核心类,这行代码会加载 META-INF/services/META-INF/seata/目录中文件填写的类名,那么如果其中有多个配置中心实现类都被加载了怎么办呢?

\n

我们注意到 ZookeeperConfigurationProvider 类的上面有一个注解:

\n
@LoadLevel(name = \"ZK\", order = 1)\n
\n

在加载多个配置中心实现类时,会根据 order 进行排序:

\n

io.seata.common.loader.EnhancedServiceLoader#findAllExtensionClass:

\n

\"\"

\n

io.seata.common.loader.EnhancedServiceLoader#loadFile:

\n

\"\"

\n

这样,就不会产生冲突了。

\n

但是我们发现 Seata 还可以用这个方法进行选择,Seata 在调用 load 方法时,还传了一个参数:

\n
Objects.requireNonNull(configType).name()\n
\n

ConfigType 为配置中心类型,是个枚举类:

\n
public enum ConfigType {\n  File, ZK, Nacos, Apollo, Consul, Etcd3, SpringCloudConfig, Custom;\n}\n
\n

我们注意到,LoadLevel 注解上还有一个 name 属性,在进行筛选实现类时,Seata 还做了这个操作:

\n

\"\"

\n

根据当前 configType 来判断是否等于 LoadLevel 的 name 属性,如果相等,那么就是当前配置的第三方配置中心实现类。

\n

第三方配置中心实现类

\n

ZookeeperConfiguration 继承了 AbstractConfiguration,它的构造方法如下:

\n

\"\"

\n

构造方法创建了一个 zkClient 对象,这里的 FILE_CONFIG 是什么呢?

\n
private static final Configuration FILE_CONFIG = ConfigurationFactory.CURRENT_FILE_INSTANCE;\n
\n

原来就是刚刚静态代码块中创建的 registry.conf 配置实现类,从该配置实现类拿到第三方配置中心的相关属性,构造第三方配置中心客户端,然后实现 Configuration 接口时:

\n

\"\"

\n

就可以利用客户端相关方法去第三方配置获取对应的参数值了。

\n

第三方配置中心配置同步脚本

\n

上周末才写好,已经提交 PR 上去了,还处于 review 中,预估会在 Seata 1.0 版本提供给大家使用,敬请期待。

\n

具体位置在 Seata 项目的 script 目录中:

\n

\"\"

\n

config.txt 为本地配置好的值,搭建好第三方配置中心之后,运行脚本会将 config.txt 的配置同步到第三方配置中心。

\n

作者简介

\n

张乘辉,目前就职于中通科技信息中心技术平台部,担任 Java 工程师,主要负责中通消息平台与全链路压测项目的研发,热爱分享技术,微信公众号「后端进阶」作者,技术博客(https://objcoding.com/)博主,Seata Contributor,GitHub ID:objcoding。

\n", - "link": "/zh-cn/blog/seata-config-center.html", - "meta": { - "title": "Seata 配置中心实现原理", - "author": "张乘辉", - "keywords": "Seata、Config", - "description": "Seata 可以支持多个第三方配置中心,那么 Seata 是如何同时兼容那么多个配置中心的呢?", - "date": "2019/12/12" - } -} \ No newline at end of file diff --git a/zh-cn/blog/seata-mybatisplus-analysis.html b/zh-cn/blog/seata-mybatisplus-analysis.html deleted file mode 100644 index ac8ce1e1..00000000 --- a/zh-cn/blog/seata-mybatisplus-analysis.html +++ /dev/null @@ -1,539 +0,0 @@ - - - - - - - - - - 透过源码解决SeataAT模式整合Mybatis-Plus失去MP特性的问题 - - - - -

透过源码解决SeataAT模式整合Mybatis-Plus失去MP特性的问题

-

项目地址:https://gitee.com/itCjb/springboot-dubbo-mybatisplus-seata

-

本文作者:FUNKYE(陈健斌),杭州某互联网公司主程。

-

介绍

-

Mybatis-Plus:MyBatis-Plus(简称 MP)是一个 MyBatis 的增强工具,在 MyBatis 的基础上只做增强不做改变,为简化开发、提高效率而生。

-

MP配置:

-
<bean id="sqlSessionFactory" class="com.baomidou.mybatisplus.extension.spring.MybatisSqlSessionFactoryBean">
-    <property name="dataSource" ref="dataSource"/>
-</bean>
-
-

Seata:Seata 是一款开源的分布式事务解决方案,致力于提供高性能和简单易用的分布式事务服务。Seata 将为用户提供了 AT、TCC、SAGA 和 XA 事务模式,为用户打造一站式的分布式解决方案。

-

AT模式机制:

-
    -
  • 一阶段:业务数据和回滚日志记录在同一个本地事务中提交,释放本地锁和连接资源。
  • -
  • 二阶段: -
      -
    • 提交异步化,非常快速地完成。
    • -
    • 回滚通过一阶段的回滚日志进行反向补偿。
    • -
    -
  • -
-

分析原因

-

​ 1.首先我们通过介绍,可以看到,mp是需要注册sqlSessionFactory,注入数据源,而Seata是通过代理数据源来保证事务的正常回滚跟提交。

-

​ 2.我们来看基于seata的官方demo提供的SeataAutoConfig的代码

-
package org.test.config;
- 
-import javax.sql.DataSource; 
- 
-import org.apache.ibatis.session.SqlSessionFactory;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.beans.factory.annotation.Autowired;
-import org.springframework.boot.autoconfigure.jdbc.DataSourceProperties;
-import org.springframework.context.annotation.Bean;
-import org.springframework.context.annotation.Configuration;
-import org.springframework.context.annotation.Primary;
- 
-import com.alibaba.druid.pool.DruidDataSource;
-import com.baomidou.mybatisplus.extension.spring.MybatisSqlSessionFactoryBean;
- 
-import io.seata.rm.datasource.DataSourceProxy;
-import io.seata.spring.annotation.GlobalTransactionScanner;
- 
-@Configuration
-public class SeataAutoConfig {
-	@Autowired(required = true)
-	private DataSourceProperties dataSourceProperties;
-	private final static Logger logger = LoggerFactory.getLogger(SeataAutoConfig.class);
- 
-	@Bean(name = "dataSource") // 声明其为Bean实例
-	@Primary // 在同样的DataSource中,首先使用被标注的DataSource
-	public DataSource druidDataSource() {
-		DruidDataSource druidDataSource = new DruidDataSource();
-		logger.info("dataSourceProperties.getUrl():{}",dataSourceProperties.getUrl());
-		druidDataSource.setUrl(dataSourceProperties.getUrl());
-		druidDataSource.setUsername(dataSourceProperties.getUsername());
-		druidDataSource.setPassword(dataSourceProperties.getPassword());
-		druidDataSource.setDriverClassName(dataSourceProperties.getDriverClassName());
-		druidDataSource.setInitialSize(0);
-		druidDataSource.setMaxActive(180);
-		druidDataSource.setMaxWait(60000);
-		druidDataSource.setMinIdle(0);
-		druidDataSource.setValidationQuery("Select 1 from DUAL");
-		druidDataSource.setTestOnBorrow(false);
-		druidDataSource.setTestOnReturn(false);
-		druidDataSource.setTestWhileIdle(true);
-		druidDataSource.setTimeBetweenEvictionRunsMillis(60000);
-		druidDataSource.setMinEvictableIdleTimeMillis(25200000);
-		druidDataSource.setRemoveAbandoned(true);
-		druidDataSource.setRemoveAbandonedTimeout(1800);
-		druidDataSource.setLogAbandoned(true);
-		logger.info("装载dataSource........");
-		return druidDataSource;
-	}
- 
-	/**
-	 * init datasource proxy
-	 * 
-	 * @Param: druidDataSource datasource bean instance
-	 * @Return: DataSourceProxy datasource proxy
-	 */
-	@Bean
-	public DataSourceProxy dataSourceProxy(DataSource dataSource) {
-		logger.info("代理dataSource........");
-		return new DataSourceProxy(dataSource);
-	}
- 
-	@Bean
-	public SqlSessionFactory sqlSessionFactory(DataSourceProxy dataSourceProxy) throws Exception {
-		MybatisSqlSessionFactoryBean factory = new MybatisSqlSessionFactoryBean();
-		factory.setDataSource(dataSourceProxy);
-        factory.setMapperLocations(new PathMatchingResourcePatternResolver()
-            .getResources("classpath*:/mapper/*.xml"));
-		return factory.getObject();
-	}
- 
-	/**
-	 * init global transaction scanner
-	 *
-	 * @Return: GlobalTransactionScanner
-	 */
-	@Bean
-	public GlobalTransactionScanner globalTransactionScanner() {
-		logger.info("配置seata........");
-		return new GlobalTransactionScanner("test-service", "test-group");
-	}
-}
-
-
-

首先看到我们的seata配置数据源的类里,我们配置了一个数据源,然后又配置了一个seata代理datasource的bean,这时候.

-

然后我们如果直接启动mp整合seata的项目会发现,分页之类的插件会直接失效,连扫描mapper都得从代码上写,这是为什么呢?

-

通过阅读以上代码,是因为我们另外的配置了一个sqlSessionFactory,导致mp的sqlSessionFactory失效了,这时候我们发现了问题的所在了,即使我们不配置sqlSessionFactoryl,也会因为mp所使用的数据源不是被seata代理过后的数据源,导致分布式事务失效.但是如何解决这个问题呢?

-

这时候我们需要去阅读mp的源码,找到他的启动类,一看便知

-
/*
- * Copyright (c) 2011-2020, baomidou (jobob@qq.com).
- * <p>
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- * <p>
- * https://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package com.baomidou.mybatisplus.autoconfigure;
- 
- 
-import com.baomidou.mybatisplus.core.MybatisConfiguration;
-import com.baomidou.mybatisplus.core.config.GlobalConfig;
-import com.baomidou.mybatisplus.core.handlers.MetaObjectHandler;
-import com.baomidou.mybatisplus.core.incrementer.IKeyGenerator;
-import com.baomidou.mybatisplus.core.injector.ISqlInjector;
-import com.baomidou.mybatisplus.extension.spring.MybatisSqlSessionFactoryBean;
-import org.apache.ibatis.annotations.Mapper;
-import org.apache.ibatis.mapping.DatabaseIdProvider;
-import org.apache.ibatis.plugin.Interceptor;
-import org.apache.ibatis.scripting.LanguageDriver;
-import org.apache.ibatis.session.ExecutorType;
-import org.apache.ibatis.session.SqlSessionFactory;
-import org.apache.ibatis.type.TypeHandler;
-import org.mybatis.spring.SqlSessionFactoryBean;
-import org.mybatis.spring.SqlSessionTemplate;
-import org.mybatis.spring.mapper.MapperFactoryBean;
-import org.mybatis.spring.mapper.MapperScannerConfigurer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.beans.BeanWrapper;
-import org.springframework.beans.BeanWrapperImpl;
-import org.springframework.beans.factory.BeanFactory;
-import org.springframework.beans.factory.BeanFactoryAware;
-import org.springframework.beans.factory.InitializingBean;
-import org.springframework.beans.factory.ObjectProvider;
-import org.springframework.beans.factory.support.BeanDefinitionBuilder;
-import org.springframework.beans.factory.support.BeanDefinitionRegistry;
-import org.springframework.boot.autoconfigure.AutoConfigurationPackages;
-import org.springframework.boot.autoconfigure.AutoConfigureAfter;
-import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
-import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
-import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
-import org.springframework.boot.autoconfigure.condition.ConditionalOnSingleCandidate;
-import org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration;
-import org.springframework.boot.context.properties.EnableConfigurationProperties;
-import org.springframework.context.ApplicationContext;
-import org.springframework.context.annotation.Bean;
-import org.springframework.context.annotation.Configuration;
-import org.springframework.context.annotation.Import;
-import org.springframework.context.annotation.ImportBeanDefinitionRegistrar;
-import org.springframework.core.io.Resource;
-import org.springframework.core.io.ResourceLoader;
-import org.springframework.core.type.AnnotationMetadata;
-import org.springframework.util.Assert;
-import org.springframework.util.CollectionUtils;
-import org.springframework.util.ObjectUtils;
-import org.springframework.util.StringUtils;
- 
-import javax.sql.DataSource;
-import java.util.List;
-import java.util.Optional;
-import java.util.stream.Stream;
- 
-/**
- * {@link EnableAutoConfiguration Auto-Configuration} for Mybatis. Contributes a
- * {@link SqlSessionFactory} and a {@link SqlSessionTemplate}.
- * <p>
- * If {@link org.mybatis.spring.annotation.MapperScan} is used, or a
- * configuration file is specified as a property, those will be considered,
- * otherwise this auto-configuration will attempt to register mappers based on
- * the interface definitions in or under the root auto-configuration package.
- * </p>
- * <p> copy from {@link org.mybatis.spring.boot.autoconfigure.MybatisAutoConfiguration}</p>
- *
- * @author Eddú Meléndez
- * @author Josh Long
- * @author Kazuki Shimizu
- * @author Eduardo Macarrón
- */
-@Configuration
-@ConditionalOnClass({SqlSessionFactory.class, SqlSessionFactoryBean.class})
-@ConditionalOnSingleCandidate(DataSource.class)
-@EnableConfigurationProperties(MybatisPlusProperties.class)
-@AutoConfigureAfter(DataSourceAutoConfiguration.class)
-public class MybatisPlusAutoConfiguration implements InitializingBean {
- 
-    private static final Logger logger = LoggerFactory.getLogger(MybatisPlusAutoConfiguration.class);
- 
-    private final MybatisPlusProperties properties;
- 
-    private final Interceptor[] interceptors;
- 
-    private final TypeHandler[] typeHandlers;
- 
-    private final LanguageDriver[] languageDrivers;
- 
-    private final ResourceLoader resourceLoader;
- 
-    private final DatabaseIdProvider databaseIdProvider;
- 
-    private final List<ConfigurationCustomizer> configurationCustomizers;
- 
-    private final List<MybatisPlusPropertiesCustomizer> mybatisPlusPropertiesCustomizers;
- 
-    private final ApplicationContext applicationContext;
- 
- 
-    public MybatisPlusAutoConfiguration(MybatisPlusProperties properties,
-                                        ObjectProvider<Interceptor[]> interceptorsProvider,
-                                        ObjectProvider<TypeHandler[]> typeHandlersProvider,
-                                        ObjectProvider<LanguageDriver[]> languageDriversProvider,
-                                        ResourceLoader resourceLoader,
-                                        ObjectProvider<DatabaseIdProvider> databaseIdProvider,
-                                        ObjectProvider<List<ConfigurationCustomizer>> configurationCustomizersProvider,
-                                        ObjectProvider<List<MybatisPlusPropertiesCustomizer>> mybatisPlusPropertiesCustomizerProvider,
-                                        ApplicationContext applicationContext) {
-        this.properties = properties;
-        this.interceptors = interceptorsProvider.getIfAvailable();
-        this.typeHandlers = typeHandlersProvider.getIfAvailable();
-        this.languageDrivers = languageDriversProvider.getIfAvailable();
-        this.resourceLoader = resourceLoader;
-        this.databaseIdProvider = databaseIdProvider.getIfAvailable();
-        this.configurationCustomizers = configurationCustomizersProvider.getIfAvailable();
-        this.mybatisPlusPropertiesCustomizers = mybatisPlusPropertiesCustomizerProvider.getIfAvailable();
-        this.applicationContext = applicationContext;
-    }
- 
-    @Override
-    public void afterPropertiesSet() {
-        if (!CollectionUtils.isEmpty(mybatisPlusPropertiesCustomizers)) {
-            mybatisPlusPropertiesCustomizers.forEach(i -> i.customize(properties));
-        }
-        checkConfigFileExists();
-    }
- 
-    private void checkConfigFileExists() {
-        if (this.properties.isCheckConfigLocation() && StringUtils.hasText(this.properties.getConfigLocation())) {
-            Resource resource = this.resourceLoader.getResource(this.properties.getConfigLocation());
-            Assert.state(resource.exists(),
-                "Cannot find config location: " + resource + " (please add config file or check your Mybatis configuration)");
-        }
-    }
- 
-    @SuppressWarnings("SpringJavaInjectionPointsAutowiringInspection")
-    @Bean
-    @ConditionalOnMissingBean
-    public SqlSessionFactory sqlSessionFactory(DataSource dataSource) throws Exception {
-        // TODO 使用 MybatisSqlSessionFactoryBean 而不是 SqlSessionFactoryBean
-        MybatisSqlSessionFactoryBean factory = new MybatisSqlSessionFactoryBean();
-        factory.setDataSource(dataSource);
-        factory.setVfs(SpringBootVFS.class);
-        if (StringUtils.hasText(this.properties.getConfigLocation())) {
-            factory.setConfigLocation(this.resourceLoader.getResource(this.properties.getConfigLocation()));
-        }
-        applyConfiguration(factory);
-        if (this.properties.getConfigurationProperties() != null) {
-            factory.setConfigurationProperties(this.properties.getConfigurationProperties());
-        }
-        if (!ObjectUtils.isEmpty(this.interceptors)) {
-            factory.setPlugins(this.interceptors);
-        }
-        if (this.databaseIdProvider != null) {
-            factory.setDatabaseIdProvider(this.databaseIdProvider);
-        }
-        if (StringUtils.hasLength(this.properties.getTypeAliasesPackage())) {
-            factory.setTypeAliasesPackage(this.properties.getTypeAliasesPackage());
-        }
-        if (this.properties.getTypeAliasesSuperType() != null) {
-            factory.setTypeAliasesSuperType(this.properties.getTypeAliasesSuperType());
-        }
-        if (StringUtils.hasLength(this.properties.getTypeHandlersPackage())) {
-            factory.setTypeHandlersPackage(this.properties.getTypeHandlersPackage());
-        }
-        if (!ObjectUtils.isEmpty(this.typeHandlers)) {
-            factory.setTypeHandlers(this.typeHandlers);
-        }
-        if (!ObjectUtils.isEmpty(this.properties.resolveMapperLocations())) {
-            factory.setMapperLocations(this.properties.resolveMapperLocations());
-        }
- 
-        // TODO 对源码做了一定的修改(因为源码适配了老旧的mybatis版本,但我们不需要适配)
-        Class<? extends LanguageDriver> defaultLanguageDriver = this.properties.getDefaultScriptingLanguageDriver();
-        if (!ObjectUtils.isEmpty(this.languageDrivers)) {
-            factory.setScriptingLanguageDrivers(this.languageDrivers);
-        }
-        Optional.ofNullable(defaultLanguageDriver).ifPresent(factory::setDefaultScriptingLanguageDriver);
- 
-        // TODO 自定义枚举包
-        if (StringUtils.hasLength(this.properties.getTypeEnumsPackage())) {
-            factory.setTypeEnumsPackage(this.properties.getTypeEnumsPackage());
-        }
-        // TODO 此处必为非 NULL
-        GlobalConfig globalConfig = this.properties.getGlobalConfig();
-        // TODO 注入填充器
-        if (this.applicationContext.getBeanNamesForType(MetaObjectHandler.class,
-            false, false).length > 0) {
-            MetaObjectHandler metaObjectHandler = this.applicationContext.getBean(MetaObjectHandler.class);
-            globalConfig.setMetaObjectHandler(metaObjectHandler);
-        }
-        // TODO 注入主键生成器
-        if (this.applicationContext.getBeanNamesForType(IKeyGenerator.class, false,
-            false).length > 0) {
-            IKeyGenerator keyGenerator = this.applicationContext.getBean(IKeyGenerator.class);
-            globalConfig.getDbConfig().setKeyGenerator(keyGenerator);
-        }
-        // TODO 注入sql注入器
-        if (this.applicationContext.getBeanNamesForType(ISqlInjector.class, false,
-            false).length > 0) {
-            ISqlInjector iSqlInjector = this.applicationContext.getBean(ISqlInjector.class);
-            globalConfig.setSqlInjector(iSqlInjector);
-        }
-        // TODO 设置 GlobalConfig 到 MybatisSqlSessionFactoryBean
-        factory.setGlobalConfig(globalConfig);
-        return factory.getObject();
-    }
- 
-    // TODO 入参使用 MybatisSqlSessionFactoryBean
-    private void applyConfiguration(MybatisSqlSessionFactoryBean factory) {
-        // TODO 使用 MybatisConfiguration
-        MybatisConfiguration configuration = this.properties.getConfiguration();
-        if (configuration == null && !StringUtils.hasText(this.properties.getConfigLocation())) {
-            configuration = new MybatisConfiguration();
-        }
-        if (configuration != null && !CollectionUtils.isEmpty(this.configurationCustomizers)) {
-            for (ConfigurationCustomizer customizer : this.configurationCustomizers) {
-                customizer.customize(configuration);
-            }
-        }
-        factory.setConfiguration(configuration);
-    }
- 
-    @Bean
-    @ConditionalOnMissingBean
-    public SqlSessionTemplate sqlSessionTemplate(SqlSessionFactory sqlSessionFactory) {
-        ExecutorType executorType = this.properties.getExecutorType();
-        if (executorType != null) {
-            return new SqlSessionTemplate(sqlSessionFactory, executorType);
-        } else {
-            return new SqlSessionTemplate(sqlSessionFactory);
-        }
-    }
- 
-    /**
-     * This will just scan the same base package as Spring Boot does. If you want more power, you can explicitly use
-     * {@link org.mybatis.spring.annotation.MapperScan} but this will get typed mappers working correctly, out-of-the-box,
-     * similar to using Spring Data JPA repositories.
-     */
-    public static class AutoConfiguredMapperScannerRegistrar implements BeanFactoryAware, ImportBeanDefinitionRegistrar {
- 
-        private BeanFactory beanFactory;
- 
-        @Override
-        public void registerBeanDefinitions(AnnotationMetadata importingClassMetadata, BeanDefinitionRegistry registry) {
- 
-            if (!AutoConfigurationPackages.has(this.beanFactory)) {
-                logger.debug("Could not determine auto-configuration package, automatic mapper scanning disabled.");
-                return;
-            }
- 
-            logger.debug("Searching for mappers annotated with @Mapper");
- 
-            List<String> packages = AutoConfigurationPackages.get(this.beanFactory);
-            if (logger.isDebugEnabled()) {
-                packages.forEach(pkg -> logger.debug("Using auto-configuration base package '{}'", pkg));
-            }
- 
-            BeanDefinitionBuilder builder = BeanDefinitionBuilder.genericBeanDefinition(MapperScannerConfigurer.class);
-            builder.addPropertyValue("processPropertyPlaceHolders", true);
-            builder.addPropertyValue("annotationClass", Mapper.class);
-            builder.addPropertyValue("basePackage", StringUtils.collectionToCommaDelimitedString(packages));
-            BeanWrapper beanWrapper = new BeanWrapperImpl(MapperScannerConfigurer.class);
-            Stream.of(beanWrapper.getPropertyDescriptors())
-                // Need to mybatis-spring 2.0.2+
-                .filter(x -> x.getName().equals("lazyInitialization")).findAny()
-                .ifPresent(x -> builder.addPropertyValue("lazyInitialization", "${mybatis.lazy-initialization:false}"));
-            registry.registerBeanDefinition(MapperScannerConfigurer.class.getName(), builder.getBeanDefinition());
-        }
- 
-        @Override
-        public void setBeanFactory(BeanFactory beanFactory) {
-            this.beanFactory = beanFactory;
-        }
-    }
- 
-    /**
-     * If mapper registering configuration or mapper scanning configuration not present, this configuration allow to scan
-     * mappers based on the same component-scanning path as Spring Boot itself.
-     */
-    @Configuration
-    @Import(AutoConfiguredMapperScannerRegistrar.class)
-    @ConditionalOnMissingBean({MapperFactoryBean.class, MapperScannerConfigurer.class})
-    public static class MapperScannerRegistrarNotFoundConfiguration implements InitializingBean {
- 
-        @Override
-        public void afterPropertiesSet() {
-            logger.debug(
-                "Not found configuration for registering mapper bean using @MapperScan, MapperFactoryBean and MapperScannerConfigurer.");
-        }
-    }
-}
-
-
-

看到mp启动类里的sqlSessionFactory方法了吗,他也是一样的注入一个数据源,这时候大家应该都知道解决方法了吧?

-

没错,就是把被代理过的数据源给放到mp的sqlSessionFactory中.

-

很简单,我们需要稍微改动一下我们的seata配置类就行了

-
package org.test.config;
-
-import javax.sql.DataSource;
-
-import org.mybatis.spring.annotation.MapperScan;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.beans.factory.annotation.Autowired;
-import org.springframework.boot.autoconfigure.jdbc.DataSourceProperties;
-import org.springframework.context.annotation.Bean;
-import org.springframework.context.annotation.Configuration;
-import org.springframework.context.annotation.Primary;
-
-import com.alibaba.druid.pool.DruidDataSource;
-
-import io.seata.rm.datasource.DataSourceProxy;
-import io.seata.spring.annotation.GlobalTransactionScanner;
-
-@Configuration
-@MapperScan("com.baomidou.springboot.mapper*")
-public class SeataAutoConfig {
-    @Autowired(required = true)
-    private DataSourceProperties dataSourceProperties;
-    private final static Logger logger = LoggerFactory.getLogger(SeataAutoConfig.class);
-    private DataSourceProxy dataSourceProxy;
-
-    @Bean(name = "dataSource") // 声明其为Bean实例
-    @Primary // 在同样的DataSource中,首先使用被标注的DataSource
-    public DataSource druidDataSource() {
-        DruidDataSource druidDataSource = new DruidDataSource();
-        logger.info("dataSourceProperties.getUrl():{}", dataSourceProperties.getUrl());
-        druidDataSource.setUrl(dataSourceProperties.getUrl());
-        druidDataSource.setUsername(dataSourceProperties.getUsername());
-        druidDataSource.setPassword(dataSourceProperties.getPassword());
-        druidDataSource.setDriverClassName(dataSourceProperties.getDriverClassName());
-        druidDataSource.setInitialSize(0);
-        druidDataSource.setMaxActive(180);
-        druidDataSource.setMaxWait(60000);
-        druidDataSource.setMinIdle(0);
-        druidDataSource.setValidationQuery("Select 1 from DUAL");
-        druidDataSource.setTestOnBorrow(false);
-        druidDataSource.setTestOnReturn(false);
-        druidDataSource.setTestWhileIdle(true);
-        druidDataSource.setTimeBetweenEvictionRunsMillis(60000);
-        druidDataSource.setMinEvictableIdleTimeMillis(25200000);
-        druidDataSource.setRemoveAbandoned(true);
-        druidDataSource.setRemoveAbandonedTimeout(1800);
-        druidDataSource.setLogAbandoned(true);
-        logger.info("装载dataSource........");
-        dataSourceProxy = new DataSourceProxy(druidDataSource);
-        return dataSourceProxy;
-    }
-
-    /**
-     * init datasource proxy
-     * 
-     * @Param: druidDataSource datasource bean instance
-     * @Return: DataSourceProxy datasource proxy
-     */
-    @Bean
-    public DataSourceProxy dataSourceProxy() {
-        logger.info("代理dataSource........");
-        return dataSourceProxy;
-    }
-
-    /**
-     * init global transaction scanner
-     *
-     * @Return: GlobalTransactionScanner
-     */
-    @Bean
-    public GlobalTransactionScanner globalTransactionScanner() {
-        logger.info("配置seata........");
-        return new GlobalTransactionScanner("test-service", "test-group");
-    }
-}
-
-
-

看代码,我们去掉了自己配置的sqlSessionFactory,直接让DataSource bean返回的是一个被代理过的bean,并且我们加入了@Primary,导致mp优先使用我们配置的数据源,这样就解决了mp因为seata代理了数据源跟创建了新的sqlSessionFactory,导致mp的插件,组件失效的bug了!

-

总结

-

踩到坑不可怕,主要又耐心的顺着每个组件实现的原理,再去思考,查找对应冲突的代码块,你一定能找到个兼容二者的方法。

-
- - - - - - - diff --git a/zh-cn/blog/seata-mybatisplus-analysis.json b/zh-cn/blog/seata-mybatisplus-analysis.json deleted file mode 100644 index 5283329c..00000000 --- a/zh-cn/blog/seata-mybatisplus-analysis.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "filename": "seata-mybatisplus-analysis.md", - "__html": "

透过源码解决SeataAT模式整合Mybatis-Plus失去MP特性的问题

\n

项目地址:https://gitee.com/itCjb/springboot-dubbo-mybatisplus-seata

\n

本文作者:FUNKYE(陈健斌),杭州某互联网公司主程。

\n

介绍

\n

Mybatis-Plus:MyBatis-Plus(简称 MP)是一个 MyBatis 的增强工具,在 MyBatis 的基础上只做增强不做改变,为简化开发、提高效率而生。

\n

MP配置:

\n
<bean id=\"sqlSessionFactory\" class=\"com.baomidou.mybatisplus.extension.spring.MybatisSqlSessionFactoryBean\">\n    <property name=\"dataSource\" ref=\"dataSource\"/>\n</bean>\n
\n

Seata:Seata 是一款开源的分布式事务解决方案,致力于提供高性能和简单易用的分布式事务服务。Seata 将为用户提供了 AT、TCC、SAGA 和 XA 事务模式,为用户打造一站式的分布式解决方案。

\n

AT模式机制:

\n
    \n
  • 一阶段:业务数据和回滚日志记录在同一个本地事务中提交,释放本地锁和连接资源。
  • \n
  • 二阶段:\n
      \n
    • 提交异步化,非常快速地完成。
    • \n
    • 回滚通过一阶段的回滚日志进行反向补偿。
    • \n
    \n
  • \n
\n

分析原因

\n

​\t1.首先我们通过介绍,可以看到,mp是需要注册sqlSessionFactory,注入数据源,而Seata是通过代理数据源来保证事务的正常回滚跟提交。

\n

​\t2.我们来看基于seata的官方demo提供的SeataAutoConfig的代码

\n
package org.test.config;\n \nimport javax.sql.DataSource; \n \nimport org.apache.ibatis.session.SqlSessionFactory;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\nimport org.springframework.beans.factory.annotation.Autowired;\nimport org.springframework.boot.autoconfigure.jdbc.DataSourceProperties;\nimport org.springframework.context.annotation.Bean;\nimport org.springframework.context.annotation.Configuration;\nimport org.springframework.context.annotation.Primary;\n \nimport com.alibaba.druid.pool.DruidDataSource;\nimport com.baomidou.mybatisplus.extension.spring.MybatisSqlSessionFactoryBean;\n \nimport io.seata.rm.datasource.DataSourceProxy;\nimport io.seata.spring.annotation.GlobalTransactionScanner;\n \n@Configuration\npublic class SeataAutoConfig {\n\t@Autowired(required = true)\n\tprivate DataSourceProperties dataSourceProperties;\n\tprivate final static Logger logger = LoggerFactory.getLogger(SeataAutoConfig.class);\n \n\t@Bean(name = \"dataSource\") // 声明其为Bean实例\n\t@Primary // 在同样的DataSource中,首先使用被标注的DataSource\n\tpublic DataSource druidDataSource() {\n\t\tDruidDataSource druidDataSource = new DruidDataSource();\n\t\tlogger.info(\"dataSourceProperties.getUrl():{}\",dataSourceProperties.getUrl());\n\t\tdruidDataSource.setUrl(dataSourceProperties.getUrl());\n\t\tdruidDataSource.setUsername(dataSourceProperties.getUsername());\n\t\tdruidDataSource.setPassword(dataSourceProperties.getPassword());\n\t\tdruidDataSource.setDriverClassName(dataSourceProperties.getDriverClassName());\n\t\tdruidDataSource.setInitialSize(0);\n\t\tdruidDataSource.setMaxActive(180);\n\t\tdruidDataSource.setMaxWait(60000);\n\t\tdruidDataSource.setMinIdle(0);\n\t\tdruidDataSource.setValidationQuery(\"Select 1 from DUAL\");\n\t\tdruidDataSource.setTestOnBorrow(false);\n\t\tdruidDataSource.setTestOnReturn(false);\n\t\tdruidDataSource.setTestWhileIdle(true);\n\t\tdruidDataSource.setTimeBetweenEvictionRunsMillis(60000);\n\t\tdruidDataSource.setMinEvictableIdleTimeMillis(25200000);\n\t\tdruidDataSource.setRemoveAbandoned(true);\n\t\tdruidDataSource.setRemoveAbandonedTimeout(1800);\n\t\tdruidDataSource.setLogAbandoned(true);\n\t\tlogger.info(\"装载dataSource........\");\n\t\treturn druidDataSource;\n\t}\n \n\t/**\n\t * init datasource proxy\n\t * \n\t * @Param: druidDataSource datasource bean instance\n\t * @Return: DataSourceProxy datasource proxy\n\t */\n\t@Bean\n\tpublic DataSourceProxy dataSourceProxy(DataSource dataSource) {\n\t\tlogger.info(\"代理dataSource........\");\n\t\treturn new DataSourceProxy(dataSource);\n\t}\n \n\t@Bean\n\tpublic SqlSessionFactory sqlSessionFactory(DataSourceProxy dataSourceProxy) throws Exception {\n\t\tMybatisSqlSessionFactoryBean factory = new MybatisSqlSessionFactoryBean();\n\t\tfactory.setDataSource(dataSourceProxy);\n        factory.setMapperLocations(new PathMatchingResourcePatternResolver()\n            .getResources(\"classpath*:/mapper/*.xml\"));\n\t\treturn factory.getObject();\n\t}\n \n\t/**\n\t * init global transaction scanner\n\t *\n\t * @Return: GlobalTransactionScanner\n\t */\n\t@Bean\n\tpublic GlobalTransactionScanner globalTransactionScanner() {\n\t\tlogger.info(\"配置seata........\");\n\t\treturn new GlobalTransactionScanner(\"test-service\", \"test-group\");\n\t}\n}\n\n
\n

首先看到我们的seata配置数据源的类里,我们配置了一个数据源,然后又配置了一个seata代理datasource的bean,这时候.

\n

然后我们如果直接启动mp整合seata的项目会发现,分页之类的插件会直接失效,连扫描mapper都得从代码上写,这是为什么呢?

\n

通过阅读以上代码,是因为我们另外的配置了一个sqlSessionFactory,导致mp的sqlSessionFactory失效了,这时候我们发现了问题的所在了,即使我们不配置sqlSessionFactoryl,也会因为mp所使用的数据源不是被seata代理过后的数据源,导致分布式事务失效.但是如何解决这个问题呢?

\n

这时候我们需要去阅读mp的源码,找到他的启动类,一看便知

\n
/*\n * Copyright (c) 2011-2020, baomidou (jobob@qq.com).\n * <p>\n * Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n * use this file except in compliance with the License. You may obtain a copy of\n * the License at\n * <p>\n * https://www.apache.org/licenses/LICENSE-2.0\n * <p>\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n * License for the specific language governing permissions and limitations under\n * the License.\n */\npackage com.baomidou.mybatisplus.autoconfigure;\n \n \nimport com.baomidou.mybatisplus.core.MybatisConfiguration;\nimport com.baomidou.mybatisplus.core.config.GlobalConfig;\nimport com.baomidou.mybatisplus.core.handlers.MetaObjectHandler;\nimport com.baomidou.mybatisplus.core.incrementer.IKeyGenerator;\nimport com.baomidou.mybatisplus.core.injector.ISqlInjector;\nimport com.baomidou.mybatisplus.extension.spring.MybatisSqlSessionFactoryBean;\nimport org.apache.ibatis.annotations.Mapper;\nimport org.apache.ibatis.mapping.DatabaseIdProvider;\nimport org.apache.ibatis.plugin.Interceptor;\nimport org.apache.ibatis.scripting.LanguageDriver;\nimport org.apache.ibatis.session.ExecutorType;\nimport org.apache.ibatis.session.SqlSessionFactory;\nimport org.apache.ibatis.type.TypeHandler;\nimport org.mybatis.spring.SqlSessionFactoryBean;\nimport org.mybatis.spring.SqlSessionTemplate;\nimport org.mybatis.spring.mapper.MapperFactoryBean;\nimport org.mybatis.spring.mapper.MapperScannerConfigurer;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\nimport org.springframework.beans.BeanWrapper;\nimport org.springframework.beans.BeanWrapperImpl;\nimport org.springframework.beans.factory.BeanFactory;\nimport org.springframework.beans.factory.BeanFactoryAware;\nimport org.springframework.beans.factory.InitializingBean;\nimport org.springframework.beans.factory.ObjectProvider;\nimport org.springframework.beans.factory.support.BeanDefinitionBuilder;\nimport org.springframework.beans.factory.support.BeanDefinitionRegistry;\nimport org.springframework.boot.autoconfigure.AutoConfigurationPackages;\nimport org.springframework.boot.autoconfigure.AutoConfigureAfter;\nimport org.springframework.boot.autoconfigure.EnableAutoConfiguration;\nimport org.springframework.boot.autoconfigure.condition.ConditionalOnClass;\nimport org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;\nimport org.springframework.boot.autoconfigure.condition.ConditionalOnSingleCandidate;\nimport org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration;\nimport org.springframework.boot.context.properties.EnableConfigurationProperties;\nimport org.springframework.context.ApplicationContext;\nimport org.springframework.context.annotation.Bean;\nimport org.springframework.context.annotation.Configuration;\nimport org.springframework.context.annotation.Import;\nimport org.springframework.context.annotation.ImportBeanDefinitionRegistrar;\nimport org.springframework.core.io.Resource;\nimport org.springframework.core.io.ResourceLoader;\nimport org.springframework.core.type.AnnotationMetadata;\nimport org.springframework.util.Assert;\nimport org.springframework.util.CollectionUtils;\nimport org.springframework.util.ObjectUtils;\nimport org.springframework.util.StringUtils;\n \nimport javax.sql.DataSource;\nimport java.util.List;\nimport java.util.Optional;\nimport java.util.stream.Stream;\n \n/**\n * {@link EnableAutoConfiguration Auto-Configuration} for Mybatis. Contributes a\n * {@link SqlSessionFactory} and a {@link SqlSessionTemplate}.\n * <p>\n * If {@link org.mybatis.spring.annotation.MapperScan} is used, or a\n * configuration file is specified as a property, those will be considered,\n * otherwise this auto-configuration will attempt to register mappers based on\n * the interface definitions in or under the root auto-configuration package.\n * </p>\n * <p> copy from {@link org.mybatis.spring.boot.autoconfigure.MybatisAutoConfiguration}</p>\n *\n * @author Eddú Meléndez\n * @author Josh Long\n * @author Kazuki Shimizu\n * @author Eduardo Macarrón\n */\n@Configuration\n@ConditionalOnClass({SqlSessionFactory.class, SqlSessionFactoryBean.class})\n@ConditionalOnSingleCandidate(DataSource.class)\n@EnableConfigurationProperties(MybatisPlusProperties.class)\n@AutoConfigureAfter(DataSourceAutoConfiguration.class)\npublic class MybatisPlusAutoConfiguration implements InitializingBean {\n \n    private static final Logger logger = LoggerFactory.getLogger(MybatisPlusAutoConfiguration.class);\n \n    private final MybatisPlusProperties properties;\n \n    private final Interceptor[] interceptors;\n \n    private final TypeHandler[] typeHandlers;\n \n    private final LanguageDriver[] languageDrivers;\n \n    private final ResourceLoader resourceLoader;\n \n    private final DatabaseIdProvider databaseIdProvider;\n \n    private final List<ConfigurationCustomizer> configurationCustomizers;\n \n    private final List<MybatisPlusPropertiesCustomizer> mybatisPlusPropertiesCustomizers;\n \n    private final ApplicationContext applicationContext;\n \n \n    public MybatisPlusAutoConfiguration(MybatisPlusProperties properties,\n                                        ObjectProvider<Interceptor[]> interceptorsProvider,\n                                        ObjectProvider<TypeHandler[]> typeHandlersProvider,\n                                        ObjectProvider<LanguageDriver[]> languageDriversProvider,\n                                        ResourceLoader resourceLoader,\n                                        ObjectProvider<DatabaseIdProvider> databaseIdProvider,\n                                        ObjectProvider<List<ConfigurationCustomizer>> configurationCustomizersProvider,\n                                        ObjectProvider<List<MybatisPlusPropertiesCustomizer>> mybatisPlusPropertiesCustomizerProvider,\n                                        ApplicationContext applicationContext) {\n        this.properties = properties;\n        this.interceptors = interceptorsProvider.getIfAvailable();\n        this.typeHandlers = typeHandlersProvider.getIfAvailable();\n        this.languageDrivers = languageDriversProvider.getIfAvailable();\n        this.resourceLoader = resourceLoader;\n        this.databaseIdProvider = databaseIdProvider.getIfAvailable();\n        this.configurationCustomizers = configurationCustomizersProvider.getIfAvailable();\n        this.mybatisPlusPropertiesCustomizers = mybatisPlusPropertiesCustomizerProvider.getIfAvailable();\n        this.applicationContext = applicationContext;\n    }\n \n    @Override\n    public void afterPropertiesSet() {\n        if (!CollectionUtils.isEmpty(mybatisPlusPropertiesCustomizers)) {\n            mybatisPlusPropertiesCustomizers.forEach(i -> i.customize(properties));\n        }\n        checkConfigFileExists();\n    }\n \n    private void checkConfigFileExists() {\n        if (this.properties.isCheckConfigLocation() && StringUtils.hasText(this.properties.getConfigLocation())) {\n            Resource resource = this.resourceLoader.getResource(this.properties.getConfigLocation());\n            Assert.state(resource.exists(),\n                \"Cannot find config location: \" + resource + \" (please add config file or check your Mybatis configuration)\");\n        }\n    }\n \n    @SuppressWarnings(\"SpringJavaInjectionPointsAutowiringInspection\")\n    @Bean\n    @ConditionalOnMissingBean\n    public SqlSessionFactory sqlSessionFactory(DataSource dataSource) throws Exception {\n        // TODO 使用 MybatisSqlSessionFactoryBean 而不是 SqlSessionFactoryBean\n        MybatisSqlSessionFactoryBean factory = new MybatisSqlSessionFactoryBean();\n        factory.setDataSource(dataSource);\n        factory.setVfs(SpringBootVFS.class);\n        if (StringUtils.hasText(this.properties.getConfigLocation())) {\n            factory.setConfigLocation(this.resourceLoader.getResource(this.properties.getConfigLocation()));\n        }\n        applyConfiguration(factory);\n        if (this.properties.getConfigurationProperties() != null) {\n            factory.setConfigurationProperties(this.properties.getConfigurationProperties());\n        }\n        if (!ObjectUtils.isEmpty(this.interceptors)) {\n            factory.setPlugins(this.interceptors);\n        }\n        if (this.databaseIdProvider != null) {\n            factory.setDatabaseIdProvider(this.databaseIdProvider);\n        }\n        if (StringUtils.hasLength(this.properties.getTypeAliasesPackage())) {\n            factory.setTypeAliasesPackage(this.properties.getTypeAliasesPackage());\n        }\n        if (this.properties.getTypeAliasesSuperType() != null) {\n            factory.setTypeAliasesSuperType(this.properties.getTypeAliasesSuperType());\n        }\n        if (StringUtils.hasLength(this.properties.getTypeHandlersPackage())) {\n            factory.setTypeHandlersPackage(this.properties.getTypeHandlersPackage());\n        }\n        if (!ObjectUtils.isEmpty(this.typeHandlers)) {\n            factory.setTypeHandlers(this.typeHandlers);\n        }\n        if (!ObjectUtils.isEmpty(this.properties.resolveMapperLocations())) {\n            factory.setMapperLocations(this.properties.resolveMapperLocations());\n        }\n \n        // TODO 对源码做了一定的修改(因为源码适配了老旧的mybatis版本,但我们不需要适配)\n        Class<? extends LanguageDriver> defaultLanguageDriver = this.properties.getDefaultScriptingLanguageDriver();\n        if (!ObjectUtils.isEmpty(this.languageDrivers)) {\n            factory.setScriptingLanguageDrivers(this.languageDrivers);\n        }\n        Optional.ofNullable(defaultLanguageDriver).ifPresent(factory::setDefaultScriptingLanguageDriver);\n \n        // TODO 自定义枚举包\n        if (StringUtils.hasLength(this.properties.getTypeEnumsPackage())) {\n            factory.setTypeEnumsPackage(this.properties.getTypeEnumsPackage());\n        }\n        // TODO 此处必为非 NULL\n        GlobalConfig globalConfig = this.properties.getGlobalConfig();\n        // TODO 注入填充器\n        if (this.applicationContext.getBeanNamesForType(MetaObjectHandler.class,\n            false, false).length > 0) {\n            MetaObjectHandler metaObjectHandler = this.applicationContext.getBean(MetaObjectHandler.class);\n            globalConfig.setMetaObjectHandler(metaObjectHandler);\n        }\n        // TODO 注入主键生成器\n        if (this.applicationContext.getBeanNamesForType(IKeyGenerator.class, false,\n            false).length > 0) {\n            IKeyGenerator keyGenerator = this.applicationContext.getBean(IKeyGenerator.class);\n            globalConfig.getDbConfig().setKeyGenerator(keyGenerator);\n        }\n        // TODO 注入sql注入器\n        if (this.applicationContext.getBeanNamesForType(ISqlInjector.class, false,\n            false).length > 0) {\n            ISqlInjector iSqlInjector = this.applicationContext.getBean(ISqlInjector.class);\n            globalConfig.setSqlInjector(iSqlInjector);\n        }\n        // TODO 设置 GlobalConfig 到 MybatisSqlSessionFactoryBean\n        factory.setGlobalConfig(globalConfig);\n        return factory.getObject();\n    }\n \n    // TODO 入参使用 MybatisSqlSessionFactoryBean\n    private void applyConfiguration(MybatisSqlSessionFactoryBean factory) {\n        // TODO 使用 MybatisConfiguration\n        MybatisConfiguration configuration = this.properties.getConfiguration();\n        if (configuration == null && !StringUtils.hasText(this.properties.getConfigLocation())) {\n            configuration = new MybatisConfiguration();\n        }\n        if (configuration != null && !CollectionUtils.isEmpty(this.configurationCustomizers)) {\n            for (ConfigurationCustomizer customizer : this.configurationCustomizers) {\n                customizer.customize(configuration);\n            }\n        }\n        factory.setConfiguration(configuration);\n    }\n \n    @Bean\n    @ConditionalOnMissingBean\n    public SqlSessionTemplate sqlSessionTemplate(SqlSessionFactory sqlSessionFactory) {\n        ExecutorType executorType = this.properties.getExecutorType();\n        if (executorType != null) {\n            return new SqlSessionTemplate(sqlSessionFactory, executorType);\n        } else {\n            return new SqlSessionTemplate(sqlSessionFactory);\n        }\n    }\n \n    /**\n     * This will just scan the same base package as Spring Boot does. If you want more power, you can explicitly use\n     * {@link org.mybatis.spring.annotation.MapperScan} but this will get typed mappers working correctly, out-of-the-box,\n     * similar to using Spring Data JPA repositories.\n     */\n    public static class AutoConfiguredMapperScannerRegistrar implements BeanFactoryAware, ImportBeanDefinitionRegistrar {\n \n        private BeanFactory beanFactory;\n \n        @Override\n        public void registerBeanDefinitions(AnnotationMetadata importingClassMetadata, BeanDefinitionRegistry registry) {\n \n            if (!AutoConfigurationPackages.has(this.beanFactory)) {\n                logger.debug(\"Could not determine auto-configuration package, automatic mapper scanning disabled.\");\n                return;\n            }\n \n            logger.debug(\"Searching for mappers annotated with @Mapper\");\n \n            List<String> packages = AutoConfigurationPackages.get(this.beanFactory);\n            if (logger.isDebugEnabled()) {\n                packages.forEach(pkg -> logger.debug(\"Using auto-configuration base package '{}'\", pkg));\n            }\n \n            BeanDefinitionBuilder builder = BeanDefinitionBuilder.genericBeanDefinition(MapperScannerConfigurer.class);\n            builder.addPropertyValue(\"processPropertyPlaceHolders\", true);\n            builder.addPropertyValue(\"annotationClass\", Mapper.class);\n            builder.addPropertyValue(\"basePackage\", StringUtils.collectionToCommaDelimitedString(packages));\n            BeanWrapper beanWrapper = new BeanWrapperImpl(MapperScannerConfigurer.class);\n            Stream.of(beanWrapper.getPropertyDescriptors())\n                // Need to mybatis-spring 2.0.2+\n                .filter(x -> x.getName().equals(\"lazyInitialization\")).findAny()\n                .ifPresent(x -> builder.addPropertyValue(\"lazyInitialization\", \"${mybatis.lazy-initialization:false}\"));\n            registry.registerBeanDefinition(MapperScannerConfigurer.class.getName(), builder.getBeanDefinition());\n        }\n \n        @Override\n        public void setBeanFactory(BeanFactory beanFactory) {\n            this.beanFactory = beanFactory;\n        }\n    }\n \n    /**\n     * If mapper registering configuration or mapper scanning configuration not present, this configuration allow to scan\n     * mappers based on the same component-scanning path as Spring Boot itself.\n     */\n    @Configuration\n    @Import(AutoConfiguredMapperScannerRegistrar.class)\n    @ConditionalOnMissingBean({MapperFactoryBean.class, MapperScannerConfigurer.class})\n    public static class MapperScannerRegistrarNotFoundConfiguration implements InitializingBean {\n \n        @Override\n        public void afterPropertiesSet() {\n            logger.debug(\n                \"Not found configuration for registering mapper bean using @MapperScan, MapperFactoryBean and MapperScannerConfigurer.\");\n        }\n    }\n}\n\n
\n

看到mp启动类里的sqlSessionFactory方法了吗,他也是一样的注入一个数据源,这时候大家应该都知道解决方法了吧?

\n

没错,就是把被代理过的数据源给放到mp的sqlSessionFactory中.

\n

很简单,我们需要稍微改动一下我们的seata配置类就行了

\n
package org.test.config;\n\nimport javax.sql.DataSource;\n\nimport org.mybatis.spring.annotation.MapperScan;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\nimport org.springframework.beans.factory.annotation.Autowired;\nimport org.springframework.boot.autoconfigure.jdbc.DataSourceProperties;\nimport org.springframework.context.annotation.Bean;\nimport org.springframework.context.annotation.Configuration;\nimport org.springframework.context.annotation.Primary;\n\nimport com.alibaba.druid.pool.DruidDataSource;\n\nimport io.seata.rm.datasource.DataSourceProxy;\nimport io.seata.spring.annotation.GlobalTransactionScanner;\n\n@Configuration\n@MapperScan(\"com.baomidou.springboot.mapper*\")\npublic class SeataAutoConfig {\n    @Autowired(required = true)\n    private DataSourceProperties dataSourceProperties;\n    private final static Logger logger = LoggerFactory.getLogger(SeataAutoConfig.class);\n    private DataSourceProxy dataSourceProxy;\n\n    @Bean(name = \"dataSource\") // 声明其为Bean实例\n    @Primary // 在同样的DataSource中,首先使用被标注的DataSource\n    public DataSource druidDataSource() {\n        DruidDataSource druidDataSource = new DruidDataSource();\n        logger.info(\"dataSourceProperties.getUrl():{}\", dataSourceProperties.getUrl());\n        druidDataSource.setUrl(dataSourceProperties.getUrl());\n        druidDataSource.setUsername(dataSourceProperties.getUsername());\n        druidDataSource.setPassword(dataSourceProperties.getPassword());\n        druidDataSource.setDriverClassName(dataSourceProperties.getDriverClassName());\n        druidDataSource.setInitialSize(0);\n        druidDataSource.setMaxActive(180);\n        druidDataSource.setMaxWait(60000);\n        druidDataSource.setMinIdle(0);\n        druidDataSource.setValidationQuery(\"Select 1 from DUAL\");\n        druidDataSource.setTestOnBorrow(false);\n        druidDataSource.setTestOnReturn(false);\n        druidDataSource.setTestWhileIdle(true);\n        druidDataSource.setTimeBetweenEvictionRunsMillis(60000);\n        druidDataSource.setMinEvictableIdleTimeMillis(25200000);\n        druidDataSource.setRemoveAbandoned(true);\n        druidDataSource.setRemoveAbandonedTimeout(1800);\n        druidDataSource.setLogAbandoned(true);\n        logger.info(\"装载dataSource........\");\n        dataSourceProxy = new DataSourceProxy(druidDataSource);\n        return dataSourceProxy;\n    }\n\n    /**\n     * init datasource proxy\n     * \n     * @Param: druidDataSource datasource bean instance\n     * @Return: DataSourceProxy datasource proxy\n     */\n    @Bean\n    public DataSourceProxy dataSourceProxy() {\n        logger.info(\"代理dataSource........\");\n        return dataSourceProxy;\n    }\n\n    /**\n     * init global transaction scanner\n     *\n     * @Return: GlobalTransactionScanner\n     */\n    @Bean\n    public GlobalTransactionScanner globalTransactionScanner() {\n        logger.info(\"配置seata........\");\n        return new GlobalTransactionScanner(\"test-service\", \"test-group\");\n    }\n}\n\n
\n

看代码,我们去掉了自己配置的sqlSessionFactory,直接让DataSource bean返回的是一个被代理过的bean,并且我们加入了@Primary,导致mp优先使用我们配置的数据源,这样就解决了mp因为seata代理了数据源跟创建了新的sqlSessionFactory,导致mp的插件,组件失效的bug了!

\n

总结

\n

踩到坑不可怕,主要又耐心的顺着每个组件实现的原理,再去思考,查找对应冲突的代码块,你一定能找到个兼容二者的方法。

\n", - "link": "/zh-cn/blog/seata-mybatisplus-analysis.html", - "meta": { - "title": "透过源码解决SeataAT模式整合Mybatis-Plus失去MP特性的问题", - "keywords": "Seata,Mybatis-Plus,分布式事务", - "description": "本文讲述如何透过源码解决Seata整合Mybatis-Plus失去MP特性的问题", - "author": "FUNKYE", - "date": "2019/11/30" - } -} \ No newline at end of file diff --git a/zh-cn/blog/seata-nacos-analysis.html b/zh-cn/blog/seata-nacos-analysis.html deleted file mode 100644 index c54b1b86..00000000 --- a/zh-cn/blog/seata-nacos-analysis.html +++ /dev/null @@ -1,397 +0,0 @@ - - - - - - - - - - Seata分布式事务启用Nacos做配置中心 - - - - -

Seata分布式事务启用Nacos做配置中心

-

项目地址

-

本文作者:FUNKYE(陈健斌),杭州某互联网公司主程。

-

前言

-

上次发布了直连方式的seata配置,详细可以看这篇博客

-

我们接着上一篇的基础上去配置nacos做配置中心跟dubbo注册中心.

-

准备工作

-

​ 1.首先去nacos的github上下载最新版本

-

-

​ 2.下载好了后,很简单,解压后到bin目录下去启动就好了,看到如图所示就成了:

-

-

​ 3.启动完毕后访问:http://127.0.0.1:8848/nacos/#/login

-

-

是不是看到这样的界面了?输入nacos(账号密码相同),先进去看看吧.

-

这时候可以发现没有任何服务注册

-

20191202204147

-

别急我们马上让seata服务连接进来.

-

Seata配置

-

​ 1.进入seata的conf文件夹看到这个木有?

-

-

就是它,编辑它:

-

20191202204353

-

20191202204437

-

​ 2.然后记得保存哦!接着我们把registry.conf文件打开编辑:

-
registry {
-  # file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
-  type = "nacos"
-
-  nacos {
-    serverAddr = "localhost"
-    namespace = ""
-    cluster = "default"
-  }
-  eureka {
-    serviceUrl = "http://localhost:8761/eureka"
-    application = "default"
-    weight = "1"
-  }
-  redis {
-    serverAddr = "localhost:6379"
-    db = "0"
-  }
-  zk {
-    cluster = "default"
-    serverAddr = "127.0.0.1:2181"
-    session.timeout = 6000
-    connect.timeout = 2000
-  }
-  consul {
-    cluster = "default"
-    serverAddr = "127.0.0.1:8500"
-  }
-  etcd3 {
-    cluster = "default"
-    serverAddr = "http://localhost:2379"
-  }
-  sofa {
-    serverAddr = "127.0.0.1:9603"
-    application = "default"
-    region = "DEFAULT_ZONE"
-    datacenter = "DefaultDataCenter"
-    cluster = "default"
-    group = "SEATA_GROUP"
-    addressWaitTime = "3000"
-  }
-  file {
-    name = "file.conf"
-  }
-}
-
-config {
-  # file、nacos 、apollo、zk、consul、etcd3
-  type = "nacos"
-
-  nacos {
-    serverAddr = "localhost"
-    namespace = ""
-  }
-  consul {
-    serverAddr = "127.0.0.1:8500"
-  }
-  apollo {
-    app.id = "seata-server"
-    apollo.meta = "http://192.168.1.204:8801"
-  }
-  zk {
-    serverAddr = "127.0.0.1:2181"
-    session.timeout = 6000
-    connect.timeout = 2000
-  }
-  etcd3 {
-    serverAddr = "http://localhost:2379"
-  }
-  file {
-    name = "file.conf"
-  }
-}
-
-
-

都编辑好了后,我们运行nacos-config.sh,这时候我们配置的nacos-config.txt的内容已经被发送到nacos中了详细如图:

-

20191202205743

-

出现以上类似的代码就是说明成功了,接着我们登录nacos配置中心,查看配置列表,出现如图列表说明配置成功了:

-

20191202205912

-

看到了吧,你的配置已经全部都提交上去了,如果再git工具内运行sh不行的话,试着把编辑sh文件,试试改成如下操作

-
for line in $(cat nacos-config.txt)
-
-do
-
-key=${line%%=*}
-value=${line#*=}
-echo "\r\n set "${key}" = "${value}
-
-result=`curl -X POST "http://127.0.0.1:8848/nacos/v1/cs/configs?dataId=$key&group=SEATA_GROUP&content=$value"`
-
-if [ "$result"x == "true"x ]; then
-
-  echo "\033[42;37m $result \033[0m"
-
-else
-
-  echo "\033[41;37 $result \033[0m"
-  let error++
-
-fi
-
-done
-
-
-if [ $error -eq 0 ]; then
-
-echo  "\r\n\033[42;37m init nacos config finished, please start seata-server. \033[0m"
-
-else
-
-echo  "\r\n\033[41;33m init nacos config fail. \033[0m"
-
-fi
-
-

​ 3.目前我们的准备工作全部完成,我们去seata-service/bin去运行seata服务吧,如图所示就成功啦!

-

20191202210112

-

进行调试

-

​ 1.首先把springboot-dubbo-mybatsiplus-seata项目的pom的依赖更改,去除掉zk这些配置,因为我们使用nacos做注册中心了.

-
	<properties>
-		<webVersion>3.1</webVersion>
-		<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
-		<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
-		<maven.compiler.source>1.8</maven.compiler.source>
-		<maven.compiler.target>1.8</maven.compiler.target>
-		<HikariCP.version>3.2.0</HikariCP.version>
-		<mybatis-plus-boot-starter.version>3.2.0</mybatis-plus-boot-starter.version>
-	</properties>
-	<parent>
-		<groupId>org.springframework.boot</groupId>
-		<artifactId>spring-boot-starter-parent</artifactId>
-		<version>2.1.8.RELEASE</version>
-	</parent>
-	<dependencies>
-		<dependency>
-			<groupId>com.alibaba.nacos</groupId>
-			<artifactId>nacos-client</artifactId>
-			<version>1.1.4</version>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.dubbo</groupId>
-			<artifactId>dubbo-registry-nacos</artifactId>
-			<version>2.7.4.1</version>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.dubbo</groupId>
-			<artifactId>dubbo-spring-boot-starter</artifactId>
-			<version>2.7.4.1</version>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.commons</groupId>
-			<artifactId>commons-lang3</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>com.alibaba</groupId>
-			<artifactId>fastjson</artifactId>
-			<version>1.2.60</version>
-		</dependency>
-		<!-- <dependency> <groupId>javax</groupId> <artifactId>javaee-api</artifactId> 
-			<version>7.0</version> <scope>provided</scope> </dependency> -->
-		<dependency>
-			<groupId>io.springfox</groupId>
-			<artifactId>springfox-swagger2</artifactId>
-			<version>2.9.2</version>
-		</dependency>
-		<dependency>
-			<groupId>io.springfox</groupId>
-			<artifactId>springfox-swagger-ui</artifactId>
-			<version>2.9.2</version>
-		</dependency>
- 
-		<!-- mybatis-plus begin -->
-		<dependency>
-			<groupId>com.baomidou</groupId>
-			<artifactId>mybatis-plus-boot-starter</artifactId>
-			<version>${mybatis-plus-boot-starter.version}</version>
-		</dependency>
-		<!-- mybatis-plus end -->
-		<!-- https://mvnrepository.com/artifact/org.projectlombok/lombok -->
-		<dependency>
-			<groupId>org.projectlombok</groupId>
-			<artifactId>lombok</artifactId>
-			<scope>provided</scope>
-		</dependency>
-		<dependency>
-			<groupId>io.seata</groupId>
-			<artifactId>seata-all</artifactId>
-			<version>0.9.0.1</version>
-		</dependency>
-		<!-- <dependency> <groupId>com.baomidou</groupId> <artifactId>dynamic-datasource-spring-boot-starter</artifactId> 
-			<version>2.5.4</version> </dependency> -->
- 
-		<!-- <dependency> <groupId>com.baomidou</groupId> <artifactId>mybatis-plus-generator</artifactId> 
-			<version>3.1.0</version> </dependency> -->
-		<!-- https://mvnrepository.com/artifact/org.freemarker/freemarker -->
-		<dependency>
-			<groupId>org.freemarker</groupId>
-			<artifactId>freemarker</artifactId>
-		</dependency>
-		<!-- https://mvnrepository.com/artifact/com.alibaba/druid-spring-boot-starter -->
-		<dependency>
-			<groupId>com.alibaba</groupId>
-			<artifactId>druid-spring-boot-starter</artifactId>
-			<version>1.1.20</version>
-		</dependency>
-		<!-- 加上这个才能辨认到log4j2.yml文件 -->
-		<dependency>
-			<groupId>com.fasterxml.jackson.dataformat</groupId>
-			<artifactId>jackson-dataformat-yaml</artifactId>
-		</dependency>
-		<dependency> <!-- 引入log4j2依赖 -->
-			<groupId>org.springframework.boot</groupId>
-			<artifactId>spring-boot-starter-log4j2</artifactId>
-		</dependency>
-		<!-- https://mvnrepository.com/artifact/mysql/mysql-connector-java -->
-		<dependency>
-			<groupId>mysql</groupId>
-			<artifactId>mysql-connector-java</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>org.springframework.boot</groupId>
-			<artifactId>spring-boot-starter-web</artifactId>
-			<exclusions>
-				<exclusion>
-					<groupId>org.springframework.boot</groupId>
-					<artifactId>spring-boot-starter-logging</artifactId>
-				</exclusion>
-				<exclusion>
-					<groupId>org.slf4j</groupId>
-					<artifactId>slf4j-log4j12</artifactId>
-				</exclusion>
-			</exclusions>
-		</dependency>
-		<dependency>
-			<groupId>org.springframework.boot</groupId>
-			<artifactId>spring-boot-starter-aop</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>org.springframework.boot</groupId>
-			<artifactId>spring-boot-starter-test</artifactId>
-			<scope>test</scope>
-		</dependency>
-		<!-- <dependency> <groupId>org.scala-lang</groupId> <artifactId>scala-library</artifactId> 
-			<version>2.11.0</version> </dependency> -->
-		<dependency>
-			<groupId>org.springframework.boot</groupId>
-			<artifactId>spring-boot-configuration-processor</artifactId>
-			<optional>true</optional>
-		</dependency>
-	</dependencies>
-
-
-

​ 2.然后更改test-service的目录结构,删除zk的配置并更改application.yml文件,目录结构与代码:

-
server:
-  port: 38888
-spring:
-  application: 
-      name: test-service
-  datasource:
-    type: com.alibaba.druid.pool.DruidDataSource
-    url: jdbc:mysql://127.0.0.1:3306/test?useUnicode=true&characterEncoding=UTF-8&serverTimezone=UTC
-    driver-class-name: com.mysql.cj.jdbc.Driver
-    username: root
-    password: 123456
-dubbo:
-  protocol:
-    loadbalance: leastactive
-    threadpool: cached
-  scan:
-    base-packages: org。test.service
-  application:
-    qos-enable: false
-    name: testserver
-  registry:
-    id: my-registry
-    address:  nacos://127.0.0.1:8848
-mybatis-plus:
-  mapper-locations: classpath:/mapper/*Mapper.xml
-  typeAliasesPackage: org.test.entity
-  global-config:
-    db-config:
-      field-strategy: not-empty
-      id-type: auto
-      db-type: mysql
-  configuration:
-    map-underscore-to-camel-case: true
-    cache-enabled: true      
-    auto-mapping-unknown-column-behavior: none
-
-20191202211833 -

​ 3.再更改registry.conf文件,如果你的nacos是其它服务器,请改成对应都ip跟端口

-
registry {
-  type = "nacos"
-  file {
-    name = "file.conf"
-  }
-   zk {
-    cluster = "default"
-    serverAddr = "127.0.0.1:2181"
-    session.timeout = 6000
-    connect.timeout = 2000
-  }
-    nacos {
-    serverAddr = "localhost"
-    namespace = ""
-    cluster = "default"
-  }
-}
-config {
-  type = "nacos"
-  file {
-    name = "file.conf"
-  }
-  zk {
-    serverAddr = "127.0.0.1:2181"
-    session.timeout = 6000
-    connect.timeout = 2000
-  }
-    nacos {
-    serverAddr = "localhost"
-    namespace = ""
-    cluster = "default"
-  }
-}
-
-

​ 4.接着我们运行provideApplication

-

20191202212000

-

启动成功啦,我们再去看seata的日志:

-

20191202212028

-

成功了,这下我们一样,去修改test-client的内容,首先一样application.yml,把zk换成nacos,这里就不详细描述了,把test-service内的registry.conf,复制到client项目的resources中覆盖原来的registry.conf.

-

然后我们可以运行clientApplication:

-

20191202212114

-

​ 5.确认服务已经被发布并测试事务运行是否正常

-

20191202212203

-

服务成功发布出来,也被成功消费了.这下我们再去swagger中去测试回滚是否一切正常,访问http://127.0.0.1:28888/swagger-ui.html

-

20191202212240

-

恭喜你,看到这一定跟我一样成功了!

-

总结

-

关于nacos的使用跟seata的简单搭建已经完成了,更详细的内容希望希望大家访问以下地址阅读详细文档

-

nacos官网

-

dubbo官网

-

seata官网

-
- - - - - - - diff --git a/zh-cn/blog/seata-nacos-analysis.json b/zh-cn/blog/seata-nacos-analysis.json deleted file mode 100644 index 86fe9c0f..00000000 --- a/zh-cn/blog/seata-nacos-analysis.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "filename": "seata-nacos-analysis.md", - "__html": "

Seata分布式事务启用Nacos做配置中心

\n

项目地址

\n

本文作者:FUNKYE(陈健斌),杭州某互联网公司主程。

\n

前言

\n

上次发布了直连方式的seata配置,详细可以看这篇博客

\n

我们接着上一篇的基础上去配置nacos做配置中心跟dubbo注册中心.

\n

准备工作

\n

​\t1.首先去nacos的github上下载最新版本

\n

​\t\"\"

\n

​\t2.下载好了后,很简单,解压后到bin目录下去启动就好了,看到如图所示就成了:

\n

\"\"

\n

​\t3.启动完毕后访问:http://127.0.0.1:8848/nacos/#/login

\n

\"\"

\n

是不是看到这样的界面了?输入nacos(账号密码相同),先进去看看吧.

\n

这时候可以发现没有任何服务注册

\n

\"20191202204147\"

\n

别急我们马上让seata服务连接进来.

\n

Seata配置

\n

​\t1.进入seata的conf文件夹看到这个木有?

\n

\"\"

\n

就是它,编辑它:

\n

\"20191202204353\"

\n

\"20191202204437\"

\n

​\t2.然后记得保存哦!接着我们把registry.conf文件打开编辑:

\n
registry {\n  # file 、nacos 、eureka、redis、zk、consul、etcd3、sofa\n  type = "nacos"\n\n  nacos {\n    serverAddr = "localhost"\n    namespace = ""\n    cluster = "default"\n  }\n  eureka {\n    serviceUrl = "http://localhost:8761/eureka"\n    application = "default"\n    weight = "1"\n  }\n  redis {\n    serverAddr = "localhost:6379"\n    db = "0"\n  }\n  zk {\n    cluster = "default"\n    serverAddr = "127.0.0.1:2181"\n    session.timeout = 6000\n    connect.timeout = 2000\n  }\n  consul {\n    cluster = "default"\n    serverAddr = "127.0.0.1:8500"\n  }\n  etcd3 {\n    cluster = "default"\n    serverAddr = "http://localhost:2379"\n  }\n  sofa {\n    serverAddr = "127.0.0.1:9603"\n    application = "default"\n    region = "DEFAULT_ZONE"\n    datacenter = "DefaultDataCenter"\n    cluster = "default"\n    group = "SEATA_GROUP"\n    addressWaitTime = "3000"\n  }\n  file {\n    name = "file.conf"\n  }\n}\n\nconfig {\n  # file、nacos 、apollo、zk、consul、etcd3\n  type = "nacos"\n\n  nacos {\n    serverAddr = "localhost"\n    namespace = ""\n  }\n  consul {\n    serverAddr = "127.0.0.1:8500"\n  }\n  apollo {\n    app.id = "seata-server"\n    apollo.meta = "http://192.168.1.204:8801"\n  }\n  zk {\n    serverAddr = "127.0.0.1:2181"\n    session.timeout = 6000\n    connect.timeout = 2000\n  }\n  etcd3 {\n    serverAddr = "http://localhost:2379"\n  }\n  file {\n    name = "file.conf"\n  }\n}\n\n
\n

都编辑好了后,我们运行nacos-config.sh,这时候我们配置的nacos-config.txt的内容已经被发送到nacos中了详细如图:

\n

\"20191202205743\"

\n

出现以上类似的代码就是说明成功了,接着我们登录nacos配置中心,查看配置列表,出现如图列表说明配置成功了:

\n

\"20191202205912\"

\n

看到了吧,你的配置已经全部都提交上去了,如果再git工具内运行sh不行的话,试着把编辑sh文件,试试改成如下操作

\n
for line in $(cat nacos-config.txt)\n\ndo\n\nkey=${line%%=*}\nvalue=${line#*=}\necho \"\\r\\n set \"${key}\" = \"${value}\n\nresult=`curl -X POST \"http://127.0.0.1:8848/nacos/v1/cs/configs?dataId=$key&group=SEATA_GROUP&content=$value\"`\n\nif [ \"$result\"x == \"true\"x ]; then\n\n  echo \"\\033[42;37m $result \\033[0m\"\n\nelse\n\n  echo \"\\033[41;37 $result \\033[0m\"\n  let error++\n\nfi\n\ndone\n\n\nif [ $error -eq 0 ]; then\n\necho  \"\\r\\n\\033[42;37m init nacos config finished, please start seata-server. \\033[0m\"\n\nelse\n\necho  \"\\r\\n\\033[41;33m init nacos config fail. \\033[0m\"\n\nfi\n
\n

​\t3.目前我们的准备工作全部完成,我们去seata-service/bin去运行seata服务吧,如图所示就成功啦!

\n

\"20191202210112\"

\n

进行调试

\n

​\t1.首先把springboot-dubbo-mybatsiplus-seata项目的pom的依赖更改,去除掉zk这些配置,因为我们使用nacos做注册中心了.

\n
\t<properties>\n\t\t<webVersion>3.1</webVersion>\n\t\t<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>\n\t\t<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>\n\t\t<maven.compiler.source>1.8</maven.compiler.source>\n\t\t<maven.compiler.target>1.8</maven.compiler.target>\n\t\t<HikariCP.version>3.2.0</HikariCP.version>\n\t\t<mybatis-plus-boot-starter.version>3.2.0</mybatis-plus-boot-starter.version>\n\t</properties>\n\t<parent>\n\t\t<groupId>org.springframework.boot</groupId>\n\t\t<artifactId>spring-boot-starter-parent</artifactId>\n\t\t<version>2.1.8.RELEASE</version>\n\t</parent>\n\t<dependencies>\n\t\t<dependency>\n\t\t\t<groupId>com.alibaba.nacos</groupId>\n\t\t\t<artifactId>nacos-client</artifactId>\n\t\t\t<version>1.1.4</version>\n\t\t</dependency>\n\t\t<dependency>\n\t\t\t<groupId>org.apache.dubbo</groupId>\n\t\t\t<artifactId>dubbo-registry-nacos</artifactId>\n\t\t\t<version>2.7.4.1</version>\n\t\t</dependency>\n\t\t<dependency>\n\t\t\t<groupId>org.apache.dubbo</groupId>\n\t\t\t<artifactId>dubbo-spring-boot-starter</artifactId>\n\t\t\t<version>2.7.4.1</version>\n\t\t</dependency>\n\t\t<dependency>\n\t\t\t<groupId>org.apache.commons</groupId>\n\t\t\t<artifactId>commons-lang3</artifactId>\n\t\t</dependency>\n\t\t<dependency>\n\t\t\t<groupId>com.alibaba</groupId>\n\t\t\t<artifactId>fastjson</artifactId>\n\t\t\t<version>1.2.60</version>\n\t\t</dependency>\n\t\t<!-- <dependency> <groupId>javax</groupId> <artifactId>javaee-api</artifactId> \n\t\t\t<version>7.0</version> <scope>provided</scope> </dependency> -->\n\t\t<dependency>\n\t\t\t<groupId>io.springfox</groupId>\n\t\t\t<artifactId>springfox-swagger2</artifactId>\n\t\t\t<version>2.9.2</version>\n\t\t</dependency>\n\t\t<dependency>\n\t\t\t<groupId>io.springfox</groupId>\n\t\t\t<artifactId>springfox-swagger-ui</artifactId>\n\t\t\t<version>2.9.2</version>\n\t\t</dependency>\n \n\t\t<!-- mybatis-plus begin -->\n\t\t<dependency>\n\t\t\t<groupId>com.baomidou</groupId>\n\t\t\t<artifactId>mybatis-plus-boot-starter</artifactId>\n\t\t\t<version>${mybatis-plus-boot-starter.version}</version>\n\t\t</dependency>\n\t\t<!-- mybatis-plus end -->\n\t\t<!-- https://mvnrepository.com/artifact/org.projectlombok/lombok -->\n\t\t<dependency>\n\t\t\t<groupId>org.projectlombok</groupId>\n\t\t\t<artifactId>lombok</artifactId>\n\t\t\t<scope>provided</scope>\n\t\t</dependency>\n\t\t<dependency>\n\t\t\t<groupId>io.seata</groupId>\n\t\t\t<artifactId>seata-all</artifactId>\n\t\t\t<version>0.9.0.1</version>\n\t\t</dependency>\n\t\t<!-- <dependency> <groupId>com.baomidou</groupId> <artifactId>dynamic-datasource-spring-boot-starter</artifactId> \n\t\t\t<version>2.5.4</version> </dependency> -->\n \n\t\t<!-- <dependency> <groupId>com.baomidou</groupId> <artifactId>mybatis-plus-generator</artifactId> \n\t\t\t<version>3.1.0</version> </dependency> -->\n\t\t<!-- https://mvnrepository.com/artifact/org.freemarker/freemarker -->\n\t\t<dependency>\n\t\t\t<groupId>org.freemarker</groupId>\n\t\t\t<artifactId>freemarker</artifactId>\n\t\t</dependency>\n\t\t<!-- https://mvnrepository.com/artifact/com.alibaba/druid-spring-boot-starter -->\n\t\t<dependency>\n\t\t\t<groupId>com.alibaba</groupId>\n\t\t\t<artifactId>druid-spring-boot-starter</artifactId>\n\t\t\t<version>1.1.20</version>\n\t\t</dependency>\n\t\t<!-- 加上这个才能辨认到log4j2.yml文件 -->\n\t\t<dependency>\n\t\t\t<groupId>com.fasterxml.jackson.dataformat</groupId>\n\t\t\t<artifactId>jackson-dataformat-yaml</artifactId>\n\t\t</dependency>\n\t\t<dependency> <!-- 引入log4j2依赖 -->\n\t\t\t<groupId>org.springframework.boot</groupId>\n\t\t\t<artifactId>spring-boot-starter-log4j2</artifactId>\n\t\t</dependency>\n\t\t<!-- https://mvnrepository.com/artifact/mysql/mysql-connector-java -->\n\t\t<dependency>\n\t\t\t<groupId>mysql</groupId>\n\t\t\t<artifactId>mysql-connector-java</artifactId>\n\t\t</dependency>\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot</groupId>\n\t\t\t<artifactId>spring-boot-starter-web</artifactId>\n\t\t\t<exclusions>\n\t\t\t\t<exclusion>\n\t\t\t\t\t<groupId>org.springframework.boot</groupId>\n\t\t\t\t\t<artifactId>spring-boot-starter-logging</artifactId>\n\t\t\t\t</exclusion>\n\t\t\t\t<exclusion>\n\t\t\t\t\t<groupId>org.slf4j</groupId>\n\t\t\t\t\t<artifactId>slf4j-log4j12</artifactId>\n\t\t\t\t</exclusion>\n\t\t\t</exclusions>\n\t\t</dependency>\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot</groupId>\n\t\t\t<artifactId>spring-boot-starter-aop</artifactId>\n\t\t</dependency>\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot</groupId>\n\t\t\t<artifactId>spring-boot-starter-test</artifactId>\n\t\t\t<scope>test</scope>\n\t\t</dependency>\n\t\t<!-- <dependency> <groupId>org.scala-lang</groupId> <artifactId>scala-library</artifactId> \n\t\t\t<version>2.11.0</version> </dependency> -->\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot</groupId>\n\t\t\t<artifactId>spring-boot-configuration-processor</artifactId>\n\t\t\t<optional>true</optional>\n\t\t</dependency>\n\t</dependencies>\n\n
\n

​\t2.然后更改test-service的目录结构,删除zk的配置并更改application.yml文件,目录结构与代码:

\n
server:\n  port: 38888\nspring:\n  application: \n      name: test-service\n  datasource:\n    type: com.alibaba.druid.pool.DruidDataSource\n    url: jdbc:mysql://127.0.0.1:3306/test?useUnicode=true&characterEncoding=UTF-8&serverTimezone=UTC\n    driver-class-name: com.mysql.cj.jdbc.Driver\n    username: root\n    password: 123456\ndubbo:\n  protocol:\n    loadbalance: leastactive\n    threadpool: cached\n  scan:\n    base-packages: org。test.service\n  application:\n    qos-enable: false\n    name: testserver\n  registry:\n    id: my-registry\n    address:  nacos://127.0.0.1:8848\nmybatis-plus:\n  mapper-locations: classpath:/mapper/*Mapper.xml\n  typeAliasesPackage: org.test.entity\n  global-config:\n    db-config:\n      field-strategy: not-empty\n      id-type: auto\n      db-type: mysql\n  configuration:\n    map-underscore-to-camel-case: true\n    cache-enabled: true      \n    auto-mapping-unknown-column-behavior: none\n
\n\"20191202211833\"\n

​\t3.再更改registry.conf文件,如果你的nacos是其它服务器,请改成对应都ip跟端口

\n
registry {\n  type = \"nacos\"\n  file {\n    name = \"file.conf\"\n  }\n   zk {\n    cluster = \"default\"\n    serverAddr = \"127.0.0.1:2181\"\n    session.timeout = 6000\n    connect.timeout = 2000\n  }\n    nacos {\n    serverAddr = \"localhost\"\n    namespace = \"\"\n    cluster = \"default\"\n  }\n}\nconfig {\n  type = \"nacos\"\n  file {\n    name = \"file.conf\"\n  }\n  zk {\n    serverAddr = \"127.0.0.1:2181\"\n    session.timeout = 6000\n    connect.timeout = 2000\n  }\n    nacos {\n    serverAddr = \"localhost\"\n    namespace = \"\"\n    cluster = \"default\"\n  }\n}\n
\n

​\t4.接着我们运行provideApplication

\n

\"20191202212000\"

\n

启动成功啦,我们再去看seata的日志:

\n

\"20191202212028\"

\n

成功了,这下我们一样,去修改test-client的内容,首先一样application.yml,把zk换成nacos,这里就不详细描述了,把test-service内的registry.conf,复制到client项目的resources中覆盖原来的registry.conf.

\n

然后我们可以运行clientApplication:

\n

\"20191202212114\"

\n

​\t5.确认服务已经被发布并测试事务运行是否正常

\n

\"20191202212203\"

\n

服务成功发布出来,也被成功消费了.这下我们再去swagger中去测试回滚是否一切正常,访问http://127.0.0.1:28888/swagger-ui.html

\n

\"20191202212240\"

\n

恭喜你,看到这一定跟我一样成功了!

\n

总结

\n

关于nacos的使用跟seata的简单搭建已经完成了,更详细的内容希望希望大家访问以下地址阅读详细文档

\n

nacos官网

\n

dubbo官网

\n

seata官网

\n", - "link": "/zh-cn/blog/seata-nacos-analysis.html", - "meta": { - "title": "Seata分布式事务启用Nacos做配置中心", - "keywords": "Seata,Nacos,分布式事务", - "description": "本文讲述如何使用Seata整合Nacos配置", - "author": "FUNKYE", - "date": "2019/12/02" - } -} \ No newline at end of file diff --git a/zh-cn/blog/seata-nacos-docker.html b/zh-cn/blog/seata-nacos-docker.html deleted file mode 100644 index 8c339f62..00000000 --- a/zh-cn/blog/seata-nacos-docker.html +++ /dev/null @@ -1,599 +0,0 @@ - - - - - - - - - - Docker部署Seata与Nacos整合 - - - - -

Docker部署Seata与Nacos整合

-

运行所使用的demo项目地址

-

本文作者:FUNKYE(陈健斌),杭州某互联网公司主程。

-

前言

-

直连方式的Seata配置博客

-

Seata整合Nacos配置博客

-

我们接着前几篇篇的基础上去配置nacos做配置中心跟dubbo注册中心.

-

准备工作

-

​ 1.安装docker

-
yum -y install docker
-
-

​ 2.创建nacos与seata的数据库

-
/******************************************/
-/*   数据库全名 = nacos   */
-/*   表名称 = config_info   */
-/******************************************/
-CREATE TABLE `config_info` (
-  `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
-  `data_id` varchar(255) NOT NULL COMMENT 'data_id',
-  `group_id` varchar(255) DEFAULT NULL,
-  `content` longtext NOT NULL COMMENT 'content',
-  `md5` varchar(32) DEFAULT NULL COMMENT 'md5',
-  `gmt_create` datetime NOT NULL DEFAULT '2010-05-05 00:00:00' COMMENT '创建时间',
-  `gmt_modified` datetime NOT NULL DEFAULT '2010-05-05 00:00:00' COMMENT '修改时间',
-  `src_user` text COMMENT 'source user',
-  `src_ip` varchar(20) DEFAULT NULL COMMENT 'source ip',
-  `app_name` varchar(128) DEFAULT NULL,
-  `tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
-  `c_desc` varchar(256) DEFAULT NULL,
-  `c_use` varchar(64) DEFAULT NULL,
-  `effect` varchar(64) DEFAULT NULL,
-  `type` varchar(64) DEFAULT NULL,
-  `c_schema` text,
-  PRIMARY KEY (`id`),
-  UNIQUE KEY `uk_configinfo_datagrouptenant` (`data_id`,`group_id`,`tenant_id`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info';
-
-/******************************************/
-/*   数据库全名 = nacos_config   */
-/*   表名称 = config_info_aggr   */
-/******************************************/
-CREATE TABLE `config_info_aggr` (
-  `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
-  `data_id` varchar(255) NOT NULL COMMENT 'data_id',
-  `group_id` varchar(255) NOT NULL COMMENT 'group_id',
-  `datum_id` varchar(255) NOT NULL COMMENT 'datum_id',
-  `content` longtext NOT NULL COMMENT '内容',
-  `gmt_modified` datetime NOT NULL COMMENT '修改时间',
-  `app_name` varchar(128) DEFAULT NULL,
-  `tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
-  PRIMARY KEY (`id`),
-  UNIQUE KEY `uk_configinfoaggr_datagrouptenantdatum` (`data_id`,`group_id`,`tenant_id`,`datum_id`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='增加租户字段';
-
-
-/******************************************/
-/*   数据库全名 = nacos_config   */
-/*   表名称 = config_info_beta   */
-/******************************************/
-CREATE TABLE `config_info_beta` (
-  `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
-  `data_id` varchar(255) NOT NULL COMMENT 'data_id',
-  `group_id` varchar(128) NOT NULL COMMENT 'group_id',
-  `app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
-  `content` longtext NOT NULL COMMENT 'content',
-  `beta_ips` varchar(1024) DEFAULT NULL COMMENT 'betaIps',
-  `md5` varchar(32) DEFAULT NULL COMMENT 'md5',
-  `gmt_create` datetime NOT NULL DEFAULT '2010-05-05 00:00:00' COMMENT '创建时间',
-  `gmt_modified` datetime NOT NULL DEFAULT '2010-05-05 00:00:00' COMMENT '修改时间',
-  `src_user` text COMMENT 'source user',
-  `src_ip` varchar(20) DEFAULT NULL COMMENT 'source ip',
-  `tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
-  PRIMARY KEY (`id`),
-  UNIQUE KEY `uk_configinfobeta_datagrouptenant` (`data_id`,`group_id`,`tenant_id`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info_beta';
-
-/******************************************/
-/*   数据库全名 = nacos_config   */
-/*   表名称 = config_info_tag   */
-/******************************************/
-CREATE TABLE `config_info_tag` (
-  `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
-  `data_id` varchar(255) NOT NULL COMMENT 'data_id',
-  `group_id` varchar(128) NOT NULL COMMENT 'group_id',
-  `tenant_id` varchar(128) DEFAULT '' COMMENT 'tenant_id',
-  `tag_id` varchar(128) NOT NULL COMMENT 'tag_id',
-  `app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
-  `content` longtext NOT NULL COMMENT 'content',
-  `md5` varchar(32) DEFAULT NULL COMMENT 'md5',
-  `gmt_create` datetime NOT NULL DEFAULT '2010-05-05 00:00:00' COMMENT '创建时间',
-  `gmt_modified` datetime NOT NULL DEFAULT '2010-05-05 00:00:00' COMMENT '修改时间',
-  `src_user` text COMMENT 'source user',
-  `src_ip` varchar(20) DEFAULT NULL COMMENT 'source ip',
-  PRIMARY KEY (`id`),
-  UNIQUE KEY `uk_configinfotag_datagrouptenanttag` (`data_id`,`group_id`,`tenant_id`,`tag_id`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info_tag';
-
-/******************************************/
-/*   数据库全名 = nacos_config   */
-/*   表名称 = config_tags_relation   */
-/******************************************/
-CREATE TABLE `config_tags_relation` (
-  `id` bigint(20) NOT NULL COMMENT 'id',
-  `tag_name` varchar(128) NOT NULL COMMENT 'tag_name',
-  `tag_type` varchar(64) DEFAULT NULL COMMENT 'tag_type',
-  `data_id` varchar(255) NOT NULL COMMENT 'data_id',
-  `group_id` varchar(128) NOT NULL COMMENT 'group_id',
-  `tenant_id` varchar(128) DEFAULT '' COMMENT 'tenant_id',
-  `nid` bigint(20) NOT NULL AUTO_INCREMENT,
-  PRIMARY KEY (`nid`),
-  UNIQUE KEY `uk_configtagrelation_configidtag` (`id`,`tag_name`,`tag_type`),
-  KEY `idx_tenant_id` (`tenant_id`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_tag_relation';
-
-/******************************************/
-/*   数据库全名 = nacos_config   */
-/*   表名称 = group_capacity   */
-/******************************************/
-CREATE TABLE `group_capacity` (
-  `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键ID',
-  `group_id` varchar(128) NOT NULL DEFAULT '' COMMENT 'Group ID,空字符表示整个集群',
-  `quota` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '配额,0表示使用默认值',
-  `usage` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '使用量',
-  `max_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个配置大小上限,单位为字节,0表示使用默认值',
-  `max_aggr_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '聚合子配置最大个数,,0表示使用默认值',
-  `max_aggr_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个聚合数据的子配置大小上限,单位为字节,0表示使用默认值',
-  `max_history_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '最大变更历史数量',
-  `gmt_create` datetime NOT NULL DEFAULT '2010-05-05 00:00:00' COMMENT '创建时间',
-  `gmt_modified` datetime NOT NULL DEFAULT '2010-05-05 00:00:00' COMMENT '修改时间',
-  PRIMARY KEY (`id`),
-  UNIQUE KEY `uk_group_id` (`group_id`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='集群、各Group容量信息表';
-
-/******************************************/
-/*   数据库全名 = nacos_config   */
-/*   表名称 = his_config_info   */
-/******************************************/
-CREATE TABLE `his_config_info` (
-  `id` bigint(64) unsigned NOT NULL,
-  `nid` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
-  `data_id` varchar(255) NOT NULL,
-  `group_id` varchar(128) NOT NULL,
-  `app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
-  `content` longtext NOT NULL,
-  `md5` varchar(32) DEFAULT NULL,
-  `gmt_create` datetime NOT NULL DEFAULT '2010-05-05 00:00:00',
-  `gmt_modified` datetime NOT NULL DEFAULT '2010-05-05 00:00:00',
-  `src_user` text,
-  `src_ip` varchar(20) DEFAULT NULL,
-  `op_type` char(10) DEFAULT NULL,
-  `tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
-  PRIMARY KEY (`nid`),
-  KEY `idx_gmt_create` (`gmt_create`),
-  KEY `idx_gmt_modified` (`gmt_modified`),
-  KEY `idx_did` (`data_id`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='多租户改造';
-
-
-/******************************************/
-/*   数据库全名 = nacos_config   */
-/*   表名称 = tenant_capacity   */
-/******************************************/
-CREATE TABLE `tenant_capacity` (
-  `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键ID',
-  `tenant_id` varchar(128) NOT NULL DEFAULT '' COMMENT 'Tenant ID',
-  `quota` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '配额,0表示使用默认值',
-  `usage` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '使用量',
-  `max_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个配置大小上限,单位为字节,0表示使用默认值',
-  `max_aggr_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '聚合子配置最大个数',
-  `max_aggr_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个聚合数据的子配置大小上限,单位为字节,0表示使用默认值',
-  `max_history_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '最大变更历史数量',
-  `gmt_create` datetime NOT NULL DEFAULT '2010-05-05 00:00:00' COMMENT '创建时间',
-  `gmt_modified` datetime NOT NULL DEFAULT '2010-05-05 00:00:00' COMMENT '修改时间',
-  PRIMARY KEY (`id`),
-  UNIQUE KEY `uk_tenant_id` (`tenant_id`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='租户容量信息表';
-
-
-CREATE TABLE `tenant_info` (
-  `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
-  `kp` varchar(128) NOT NULL COMMENT 'kp',
-  `tenant_id` varchar(128) default '' COMMENT 'tenant_id',
-  `tenant_name` varchar(128) default '' COMMENT 'tenant_name',
-  `tenant_desc` varchar(256) DEFAULT NULL COMMENT 'tenant_desc',
-  `create_source` varchar(32) DEFAULT NULL COMMENT 'create_source',
-  `gmt_create` bigint(20) NOT NULL COMMENT '创建时间',
-  `gmt_modified` bigint(20) NOT NULL COMMENT '修改时间',
-  PRIMARY KEY (`id`),
-  UNIQUE KEY `uk_tenant_info_kptenantid` (`kp`,`tenant_id`),
-  KEY `idx_tenant_id` (`tenant_id`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='tenant_info';
-
-CREATE TABLE users (
-	username varchar(50) NOT NULL PRIMARY KEY,
-	password varchar(500) NOT NULL,
-	enabled boolean NOT NULL
-);
-
-CREATE TABLE roles (
-	username varchar(50) NOT NULL,
-	role varchar(50) NOT NULL
-);
-
-INSERT INTO users (username, password, enabled) VALUES ('nacos', '$2a$10$EuWPZHzz32dJN7jexM34MOeYirDdFAZm2kuWj7VEOJhhZkDrxfvUu', TRUE);
-
-INSERT INTO roles (username, role) VALUES ('nacos', 'ROLE_ADMIN');
-
-
-
-- the table to store GlobalSession data
-drop table if exists `global_table`;
-create table `global_table` (
-  `xid` varchar(128)  not null,
-  `transaction_id` bigint,
-  `status` tinyint not null,
-  `application_id` varchar(32),
-  `transaction_service_group` varchar(32),
-  `transaction_name` varchar(128),
-  `timeout` int,
-  `begin_time` bigint,
-  `application_data` varchar(2000),
-  `gmt_create` datetime,
-  `gmt_modified` datetime,
-  primary key (`xid`),
-  key `idx_gmt_modified_status` (`gmt_modified`, `status`),
-  key `idx_transaction_id` (`transaction_id`)
-);
-
--- the table to store BranchSession data
-drop table if exists `branch_table`;
-create table `branch_table` (
-  `branch_id` bigint not null,
-  `xid` varchar(128) not null,
-  `transaction_id` bigint ,
-  `resource_group_id` varchar(32),
-  `resource_id` varchar(256) ,
-  `lock_key` varchar(128) ,
-  `branch_type` varchar(8) ,
-  `status` tinyint,
-  `client_id` varchar(64),
-  `application_data` varchar(2000),
-  `gmt_create` datetime,
-  `gmt_modified` datetime,
-  primary key (`branch_id`),
-  key `idx_xid` (`xid`)
-);
-
--- the table to store lock data
-drop table if exists `lock_table`;
-create table `lock_table` (
-  `row_key` varchar(128) not null,
-  `xid` varchar(96),
-  `transaction_id` long ,
-  `branch_id` long,
-  `resource_id` varchar(256) ,
-  `table_name` varchar(32) ,
-  `pk` varchar(36) ,
-  `gmt_create` datetime ,
-  `gmt_modified` datetime,
-  primary key(`row_key`)
-);
-
-
-

​ 3.拉取nacos以及seata镜像并运行

-
docker run -d --name nacos -p 8848:8848 -e MODE=standalone -e MYSQL_MASTER_SERVICE_HOST=你的mysql所在ip -e MYSQL_MASTER_SERVICE_DB_NAME=nacos -e MYSQL_MASTER_SERVICE_USER=root -e MYSQL_MASTER_SERVICE_PASSWORD=mysql密码 -e MYSQL_SLAVE_SERVICE_HOST=你的mysql所在ip -e SPRING_DATASOURCE_PLATFORM=mysql -e MYSQL_DATABASE_NUM=1 nacos/nacos-server:latest
-
-
docker run -d --name seata -p 8091:8091 -e SEATA_IP=你想指定的ip -e SEATA_PORT=8091 seataio/seata-server:latest
-
-

Seata配置

-

​ 1.由于seata容器内没有内置vim,我们可以直接将要文件夹cp到宿主机外来编辑好了,再cp回去

-
docker cp 容器id:seata-server/resources 你想放置的目录
-
-

​ 2.使用如下代码获取两个容器的ip地址

-
docker inspect --format='{{.NetworkSettings.IPAddress}}' ID/NAMES
-
-

​ 3.nacos-config.txt编辑为如下内容

-
transport.type=TCP
-transport.server=NIO
-transport.heartbeat=true
-transport.thread-factory.boss-thread-prefix=NettyBoss
-transport.thread-factory.worker-thread-prefix=NettyServerNIOWorker
-transport.thread-factory.server-executor-thread-prefix=NettyServerBizHandler
-transport.thread-factory.share-boss-worker=false
-transport.thread-factory.client-selector-thread-prefix=NettyClientSelector
-transport.thread-factory.client-selector-thread-size=1
-transport.thread-factory.client-worker-thread-prefix=NettyClientWorkerThread
-transport.thread-factory.boss-thread-size=1
-transport.thread-factory.worker-thread-size=8
-transport.shutdown.wait=3
-service.vgroup_mapping.你的事务组名=default
-service.enableDegrade=false
-service.disable=false
-client.rm.async.commit.buffer.limit=10000
-client.rm.lock.retry.internal=10
-client.rm.lock.retry.times=30
-client.rm.report.retry.count=5
-client.rm.lock.retry.policy.branch-rollback-on-conflict=true
-client.rm.table.meta.check.enable=true
-client.rm.report.success.enable=true
-client.tm.commit.retry.count=5
-client.tm.rollback.retry.count=5
-store.mode=db
-store.file.dir=file_store/data
-store.file.max-branch-session-size=16384
-store.file.max-global-session-size=512
-store.file.file-write-buffer-cache-size=16384
-store.file.flush-disk-mode=async
-store.file.session.reload.read_size=100
-store.db.datasource=dbcp
-store.db.db-type=mysql
-store.db.driver-class-name=com.mysql.jdbc.Driver
-store.db.url=jdbc:mysql://你的mysql所在ip:3306/seata?useUnicode=true
-store.db.user=mysql帐号
-store.db.password=mysql密码
-store.db.min-conn=1
-store.db.max-conn=3
-store.db.global.table=global_table
-store.db.branch.table=branch_table
-store.db.query-limit=100
-store.db.lock-table=lock_table
-server.recovery.committing-retry-period=1000
-server.recovery.asyn-committing-retry-period=1000
-server.recovery.rollbacking-retry-period=1000
-server.recovery.timeout-retry-period=1000
-server.max.commit.retry.timeout=-1
-server.max.rollback.retry.timeout=-1
-client.undo.data.validation=true
-client.undo.log.serialization=jackson
-server.undo.log.save.days=7
-server.undo.log.delete.period=86400000
-client.undo.log.table=undo_log
-transport.serialization=seata
-transport.compressor=none
-metrics.enabled=false
-metrics.registry-type=compact
-metrics.exporter-list=prometheus
-metrics.exporter-prometheus-port=9898
-client.support.spring.datasource.autoproxy=false
-
-

详细参数配置请点此处

-

​ 4.registry.conf编辑为如下内容

-
registry {
-  # file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
-  type = "nacos"
-
-  nacos {
-    serverAddr = "nacos容器ip:8848"
-    namespace = ""
-    cluster = "default"
-  }
-  eureka {
-    serviceUrl = "http://localhost:8761/eureka"
-    application = "default"
-    weight = "1"
-  }
-  redis {
-    serverAddr = "localhost:6379"
-    db = "0"
-  }
-  zk {
-    cluster = "default"
-    serverAddr = "127.0.0.1:2181"
-    session.timeout = 6000
-    connect.timeout = 2000
-  }
-  consul {
-    cluster = "default"
-    serverAddr = "127.0.0.1:8500"
-  }
-  etcd3 {
-    cluster = "default"
-    serverAddr = "http://localhost:2379"
-  }
-  sofa {
-    serverAddr = "127.0.0.1:9603"
-    application = "default"
-    region = "DEFAULT_ZONE"
-    datacenter = "DefaultDataCenter"
-    cluster = "default"
-    group = "SEATA_GROUP"
-    addressWaitTime = "3000"
-  }
-  file {
-    name = "file.conf"
-  }
-}
-
-config {
-  # file、nacos 、apollo、zk、consul、etcd3
-  type = "nacos"
-
-  nacos {
-    serverAddr = "nacos容器ip:8848"
-    namespace = ""
-  }
-  consul {
-    serverAddr = "127.0.0.1:8500"
-  }
-  apollo {
-    app.id = "seata-server"
-    apollo.meta = "http://192.168.1.204:8801"
-  }
-  zk {
-    serverAddr = "127.0.0.1:2181"
-    session.timeout = 6000
-    connect.timeout = 2000
-  }
-  etcd3 {
-    serverAddr = "http://localhost:2379"
-  }
-  file {
-    name = "file.conf"
-  }
-}
-
-

​ 5.配置完成后使用如下命令,把修改完成的registry.conf复制到容器中,并重启查看日志运行

-
docker cp /home/seata/resources/registry.conf seata:seata-server/resources/
-docker restart seata
-docker logs -f seata
-
-

6.修改nacos-config.sh

-
for line in $(cat nacos-config.txt)
-
-do
-
-key=${line%%=*}
-value=${line#*=}
-echo "\r\n set "${key}" = "${value}
-
-result=`curl -X POST "http://nacos容器的ip:8848/nacos/v1/cs/configs?dataId=$key&group=SEATA_GROUP&content=$value"`
-
-if [ "$result"x == "true"x ]; then
-
-  echo "\033[42;37m $result \033[0m"
-
-else
-
-  echo "\033[41;37 $result \033[0m"
-  let error++
-
-fi
-
-done
-
-
-if [ $error -eq 0 ]; then
-
-echo  "\r\n\033[42;37m init nacos config finished, please start seata-server. \033[0m"
-
-else
-
-echo  "\r\n\033[41;33m init nacos config fail. \033[0m"
-
-fi
-
-

​ 7.运行nacos-config.sh将配置上传的nacos中,登录nacos控制中心查看

-

20191202205912

-

​ 如图所示便是成功了.

-

进行调试

-

​ 1.拉取博文中所示的项目,修改test-service的application.yml与registry.conf

-
registry {
-  type = "nacos"
-  file {
-    name = "file.conf"
-  }
-  nacos {
-    serverAddr = "宿主机ip:8848"
-    namespace = ""
-    cluster = "default"
-  }
-}
-config {
-  type = "nacos"
-  file {
-    name = "file.conf"
-  }
-  zk {
-    serverAddr = "127.0.0.1:2181"
-    session.timeout = 6000
-    connect.timeout = 2000
-  }
-  nacos {
-    serverAddr = "宿主机ip:8848"
-    namespace = ""
-    cluster = "default"
-  }
-}
-
-
-
server:
-  port: 38888
-spring:
-  application: 
-      name: test-service
-  datasource:
-    type: com.alibaba.druid.pool.DruidDataSource
-    url: jdbc:mysql://mysqlip:3306/test?useUnicode=true&characterEncoding=UTF-8&serverTimezone=UTC
-    driver-class-name: com.mysql.cj.jdbc.Driver
-    username: root
-    password: 123456
-dubbo:
-  protocol:
-    threadpool: cached
-  scan:
-    base-packages: com.example
-  application:
-    qos-enable: false
-    name: testserver
-  registry:
-    id: my-registry
-    address:  nacos://宿主机ip:8848
-mybatis-plus:
-  mapper-locations: classpath:/mapper/*Mapper.xml
-  typeAliasesPackage: org.test.entity
-  global-config:
-    db-config:
-      field-strategy: not-empty
-      id-type: auto
-      db-type: mysql
-  configuration:
-    map-underscore-to-camel-case: true
-    cache-enabled: true
-    log-impl: org.apache.ibatis.logging.stdout.StdOutImpl
-    auto-mapping-unknown-column-behavior: none
-
-

​ 2.把修改完成的registry.conf复制到test-client-resources中,并修改application

-
spring:
-  application:
-     name: test
-  datasource:
-     driver-class-name: com.mysql.cj.jdbc.Driver
-     url: jdbc:mysql://mysqlIp:3306/test?userSSL=true&useUnicode=true&characterEncoding=UTF8&serverTimezone=Asia/Shanghai
-     username: root
-     password: 123456
-  mvc:
-    servlet:
-      load-on-startup: 1
-  http:
-    encoding:
-            force: true
-            charset: utf-8
-            enabled: true
-    multipart:
-      max-file-size: 10MB
-      max-request-size: 10MB
-dubbo:
-  registry:
-    id: my-registry
-    address:  nacos://宿主机ip:8848
-  application:
-    name: dubbo-demo-client
-    qos-enable: false
-server:
-  port: 28888
-  max-http-header-size: 8192
-  address: 0.0.0.0
-  tomcat:
-    max-http-post-size: 104857600
-
-

​ 4.依次运行test-service,test-client.

-

​ 5.查看nacos中服务列表是否如下图所示

-

20191203132351

-

总结

-

关于nacos与seata的docker部署已经完成了,更详细的内容希望希望大家访问以下地址阅读详细文档

-

nacos官网

-

dubbo官网

-

seata官网

-

docker官网

-
- - - - - - - diff --git a/zh-cn/blog/seata-nacos-docker.json b/zh-cn/blog/seata-nacos-docker.json deleted file mode 100644 index b1e8e0d9..00000000 --- a/zh-cn/blog/seata-nacos-docker.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "filename": "seata-nacos-docker.md", - "__html": "

Docker部署Seata与Nacos整合

\n

运行所使用的demo项目地址

\n

本文作者:FUNKYE(陈健斌),杭州某互联网公司主程。

\n

前言

\n

直连方式的Seata配置博客

\n

Seata整合Nacos配置博客

\n

我们接着前几篇篇的基础上去配置nacos做配置中心跟dubbo注册中心.

\n

准备工作

\n

​\t1.安装docker

\n
yum -y install docker\n
\n

​\t2.创建nacos与seata的数据库

\n
/******************************************/\n/*   数据库全名 = nacos   */\n/*   表名称 = config_info   */\n/******************************************/\nCREATE TABLE `config_info` (\n  `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',\n  `data_id` varchar(255) NOT NULL COMMENT 'data_id',\n  `group_id` varchar(255) DEFAULT NULL,\n  `content` longtext NOT NULL COMMENT 'content',\n  `md5` varchar(32) DEFAULT NULL COMMENT 'md5',\n  `gmt_create` datetime NOT NULL DEFAULT '2010-05-05 00:00:00' COMMENT '创建时间',\n  `gmt_modified` datetime NOT NULL DEFAULT '2010-05-05 00:00:00' COMMENT '修改时间',\n  `src_user` text COMMENT 'source user',\n  `src_ip` varchar(20) DEFAULT NULL COMMENT 'source ip',\n  `app_name` varchar(128) DEFAULT NULL,\n  `tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',\n  `c_desc` varchar(256) DEFAULT NULL,\n  `c_use` varchar(64) DEFAULT NULL,\n  `effect` varchar(64) DEFAULT NULL,\n  `type` varchar(64) DEFAULT NULL,\n  `c_schema` text,\n  PRIMARY KEY (`id`),\n  UNIQUE KEY `uk_configinfo_datagrouptenant` (`data_id`,`group_id`,`tenant_id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info';\n\n/******************************************/\n/*   数据库全名 = nacos_config   */\n/*   表名称 = config_info_aggr   */\n/******************************************/\nCREATE TABLE `config_info_aggr` (\n  `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',\n  `data_id` varchar(255) NOT NULL COMMENT 'data_id',\n  `group_id` varchar(255) NOT NULL COMMENT 'group_id',\n  `datum_id` varchar(255) NOT NULL COMMENT 'datum_id',\n  `content` longtext NOT NULL COMMENT '内容',\n  `gmt_modified` datetime NOT NULL COMMENT '修改时间',\n  `app_name` varchar(128) DEFAULT NULL,\n  `tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',\n  PRIMARY KEY (`id`),\n  UNIQUE KEY `uk_configinfoaggr_datagrouptenantdatum` (`data_id`,`group_id`,`tenant_id`,`datum_id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='增加租户字段';\n\n\n/******************************************/\n/*   数据库全名 = nacos_config   */\n/*   表名称 = config_info_beta   */\n/******************************************/\nCREATE TABLE `config_info_beta` (\n  `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',\n  `data_id` varchar(255) NOT NULL COMMENT 'data_id',\n  `group_id` varchar(128) NOT NULL COMMENT 'group_id',\n  `app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',\n  `content` longtext NOT NULL COMMENT 'content',\n  `beta_ips` varchar(1024) DEFAULT NULL COMMENT 'betaIps',\n  `md5` varchar(32) DEFAULT NULL COMMENT 'md5',\n  `gmt_create` datetime NOT NULL DEFAULT '2010-05-05 00:00:00' COMMENT '创建时间',\n  `gmt_modified` datetime NOT NULL DEFAULT '2010-05-05 00:00:00' COMMENT '修改时间',\n  `src_user` text COMMENT 'source user',\n  `src_ip` varchar(20) DEFAULT NULL COMMENT 'source ip',\n  `tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',\n  PRIMARY KEY (`id`),\n  UNIQUE KEY `uk_configinfobeta_datagrouptenant` (`data_id`,`group_id`,`tenant_id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info_beta';\n\n/******************************************/\n/*   数据库全名 = nacos_config   */\n/*   表名称 = config_info_tag   */\n/******************************************/\nCREATE TABLE `config_info_tag` (\n  `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',\n  `data_id` varchar(255) NOT NULL COMMENT 'data_id',\n  `group_id` varchar(128) NOT NULL COMMENT 'group_id',\n  `tenant_id` varchar(128) DEFAULT '' COMMENT 'tenant_id',\n  `tag_id` varchar(128) NOT NULL COMMENT 'tag_id',\n  `app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',\n  `content` longtext NOT NULL COMMENT 'content',\n  `md5` varchar(32) DEFAULT NULL COMMENT 'md5',\n  `gmt_create` datetime NOT NULL DEFAULT '2010-05-05 00:00:00' COMMENT '创建时间',\n  `gmt_modified` datetime NOT NULL DEFAULT '2010-05-05 00:00:00' COMMENT '修改时间',\n  `src_user` text COMMENT 'source user',\n  `src_ip` varchar(20) DEFAULT NULL COMMENT 'source ip',\n  PRIMARY KEY (`id`),\n  UNIQUE KEY `uk_configinfotag_datagrouptenanttag` (`data_id`,`group_id`,`tenant_id`,`tag_id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info_tag';\n\n/******************************************/\n/*   数据库全名 = nacos_config   */\n/*   表名称 = config_tags_relation   */\n/******************************************/\nCREATE TABLE `config_tags_relation` (\n  `id` bigint(20) NOT NULL COMMENT 'id',\n  `tag_name` varchar(128) NOT NULL COMMENT 'tag_name',\n  `tag_type` varchar(64) DEFAULT NULL COMMENT 'tag_type',\n  `data_id` varchar(255) NOT NULL COMMENT 'data_id',\n  `group_id` varchar(128) NOT NULL COMMENT 'group_id',\n  `tenant_id` varchar(128) DEFAULT '' COMMENT 'tenant_id',\n  `nid` bigint(20) NOT NULL AUTO_INCREMENT,\n  PRIMARY KEY (`nid`),\n  UNIQUE KEY `uk_configtagrelation_configidtag` (`id`,`tag_name`,`tag_type`),\n  KEY `idx_tenant_id` (`tenant_id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_tag_relation';\n\n/******************************************/\n/*   数据库全名 = nacos_config   */\n/*   表名称 = group_capacity   */\n/******************************************/\nCREATE TABLE `group_capacity` (\n  `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键ID',\n  `group_id` varchar(128) NOT NULL DEFAULT '' COMMENT 'Group ID,空字符表示整个集群',\n  `quota` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '配额,0表示使用默认值',\n  `usage` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '使用量',\n  `max_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个配置大小上限,单位为字节,0表示使用默认值',\n  `max_aggr_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '聚合子配置最大个数,,0表示使用默认值',\n  `max_aggr_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个聚合数据的子配置大小上限,单位为字节,0表示使用默认值',\n  `max_history_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '最大变更历史数量',\n  `gmt_create` datetime NOT NULL DEFAULT '2010-05-05 00:00:00' COMMENT '创建时间',\n  `gmt_modified` datetime NOT NULL DEFAULT '2010-05-05 00:00:00' COMMENT '修改时间',\n  PRIMARY KEY (`id`),\n  UNIQUE KEY `uk_group_id` (`group_id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='集群、各Group容量信息表';\n\n/******************************************/\n/*   数据库全名 = nacos_config   */\n/*   表名称 = his_config_info   */\n/******************************************/\nCREATE TABLE `his_config_info` (\n  `id` bigint(64) unsigned NOT NULL,\n  `nid` bigint(20) unsigned NOT NULL AUTO_INCREMENT,\n  `data_id` varchar(255) NOT NULL,\n  `group_id` varchar(128) NOT NULL,\n  `app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',\n  `content` longtext NOT NULL,\n  `md5` varchar(32) DEFAULT NULL,\n  `gmt_create` datetime NOT NULL DEFAULT '2010-05-05 00:00:00',\n  `gmt_modified` datetime NOT NULL DEFAULT '2010-05-05 00:00:00',\n  `src_user` text,\n  `src_ip` varchar(20) DEFAULT NULL,\n  `op_type` char(10) DEFAULT NULL,\n  `tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',\n  PRIMARY KEY (`nid`),\n  KEY `idx_gmt_create` (`gmt_create`),\n  KEY `idx_gmt_modified` (`gmt_modified`),\n  KEY `idx_did` (`data_id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='多租户改造';\n\n\n/******************************************/\n/*   数据库全名 = nacos_config   */\n/*   表名称 = tenant_capacity   */\n/******************************************/\nCREATE TABLE `tenant_capacity` (\n  `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键ID',\n  `tenant_id` varchar(128) NOT NULL DEFAULT '' COMMENT 'Tenant ID',\n  `quota` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '配额,0表示使用默认值',\n  `usage` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '使用量',\n  `max_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个配置大小上限,单位为字节,0表示使用默认值',\n  `max_aggr_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '聚合子配置最大个数',\n  `max_aggr_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个聚合数据的子配置大小上限,单位为字节,0表示使用默认值',\n  `max_history_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '最大变更历史数量',\n  `gmt_create` datetime NOT NULL DEFAULT '2010-05-05 00:00:00' COMMENT '创建时间',\n  `gmt_modified` datetime NOT NULL DEFAULT '2010-05-05 00:00:00' COMMENT '修改时间',\n  PRIMARY KEY (`id`),\n  UNIQUE KEY `uk_tenant_id` (`tenant_id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='租户容量信息表';\n\n\nCREATE TABLE `tenant_info` (\n  `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',\n  `kp` varchar(128) NOT NULL COMMENT 'kp',\n  `tenant_id` varchar(128) default '' COMMENT 'tenant_id',\n  `tenant_name` varchar(128) default '' COMMENT 'tenant_name',\n  `tenant_desc` varchar(256) DEFAULT NULL COMMENT 'tenant_desc',\n  `create_source` varchar(32) DEFAULT NULL COMMENT 'create_source',\n  `gmt_create` bigint(20) NOT NULL COMMENT '创建时间',\n  `gmt_modified` bigint(20) NOT NULL COMMENT '修改时间',\n  PRIMARY KEY (`id`),\n  UNIQUE KEY `uk_tenant_info_kptenantid` (`kp`,`tenant_id`),\n  KEY `idx_tenant_id` (`tenant_id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='tenant_info';\n\nCREATE TABLE users (\n\tusername varchar(50) NOT NULL PRIMARY KEY,\n\tpassword varchar(500) NOT NULL,\n\tenabled boolean NOT NULL\n);\n\nCREATE TABLE roles (\n\tusername varchar(50) NOT NULL,\n\trole varchar(50) NOT NULL\n);\n\nINSERT INTO users (username, password, enabled) VALUES ('nacos', '$2a$10$EuWPZHzz32dJN7jexM34MOeYirDdFAZm2kuWj7VEOJhhZkDrxfvUu', TRUE);\n\nINSERT INTO roles (username, role) VALUES ('nacos', 'ROLE_ADMIN');\n\n
\n
-- the table to store GlobalSession data\ndrop table if exists `global_table`;\ncreate table `global_table` (\n  `xid` varchar(128)  not null,\n  `transaction_id` bigint,\n  `status` tinyint not null,\n  `application_id` varchar(32),\n  `transaction_service_group` varchar(32),\n  `transaction_name` varchar(128),\n  `timeout` int,\n  `begin_time` bigint,\n  `application_data` varchar(2000),\n  `gmt_create` datetime,\n  `gmt_modified` datetime,\n  primary key (`xid`),\n  key `idx_gmt_modified_status` (`gmt_modified`, `status`),\n  key `idx_transaction_id` (`transaction_id`)\n);\n\n-- the table to store BranchSession data\ndrop table if exists `branch_table`;\ncreate table `branch_table` (\n  `branch_id` bigint not null,\n  `xid` varchar(128) not null,\n  `transaction_id` bigint ,\n  `resource_group_id` varchar(32),\n  `resource_id` varchar(256) ,\n  `lock_key` varchar(128) ,\n  `branch_type` varchar(8) ,\n  `status` tinyint,\n  `client_id` varchar(64),\n  `application_data` varchar(2000),\n  `gmt_create` datetime,\n  `gmt_modified` datetime,\n  primary key (`branch_id`),\n  key `idx_xid` (`xid`)\n);\n\n-- the table to store lock data\ndrop table if exists `lock_table`;\ncreate table `lock_table` (\n  `row_key` varchar(128) not null,\n  `xid` varchar(96),\n  `transaction_id` long ,\n  `branch_id` long,\n  `resource_id` varchar(256) ,\n  `table_name` varchar(32) ,\n  `pk` varchar(36) ,\n  `gmt_create` datetime ,\n  `gmt_modified` datetime,\n  primary key(`row_key`)\n);\n\n
\n

​\t3.拉取nacos以及seata镜像并运行

\n
docker run -d --name nacos -p 8848:8848 -e MODE=standalone -e MYSQL_MASTER_SERVICE_HOST=你的mysql所在ip -e MYSQL_MASTER_SERVICE_DB_NAME=nacos -e MYSQL_MASTER_SERVICE_USER=root -e MYSQL_MASTER_SERVICE_PASSWORD=mysql密码 -e MYSQL_SLAVE_SERVICE_HOST=你的mysql所在ip -e SPRING_DATASOURCE_PLATFORM=mysql -e MYSQL_DATABASE_NUM=1 nacos/nacos-server:latest\n
\n
docker run -d --name seata -p 8091:8091 -e SEATA_IP=你想指定的ip -e SEATA_PORT=8091 seataio/seata-server:latest\n
\n

Seata配置

\n

​\t1.由于seata容器内没有内置vim,我们可以直接将要文件夹cp到宿主机外来编辑好了,再cp回去

\n
docker cp 容器id:seata-server/resources 你想放置的目录\n
\n

​\t2.使用如下代码获取两个容器的ip地址

\n
docker inspect --format='{{.NetworkSettings.IPAddress}}' ID/NAMES\n
\n

​\t3.nacos-config.txt编辑为如下内容

\n
transport.type=TCP\ntransport.server=NIO\ntransport.heartbeat=true\ntransport.thread-factory.boss-thread-prefix=NettyBoss\ntransport.thread-factory.worker-thread-prefix=NettyServerNIOWorker\ntransport.thread-factory.server-executor-thread-prefix=NettyServerBizHandler\ntransport.thread-factory.share-boss-worker=false\ntransport.thread-factory.client-selector-thread-prefix=NettyClientSelector\ntransport.thread-factory.client-selector-thread-size=1\ntransport.thread-factory.client-worker-thread-prefix=NettyClientWorkerThread\ntransport.thread-factory.boss-thread-size=1\ntransport.thread-factory.worker-thread-size=8\ntransport.shutdown.wait=3\nservice.vgroup_mapping.你的事务组名=default\nservice.enableDegrade=false\nservice.disable=false\nclient.rm.async.commit.buffer.limit=10000\nclient.rm.lock.retry.internal=10\nclient.rm.lock.retry.times=30\nclient.rm.report.retry.count=5\nclient.rm.lock.retry.policy.branch-rollback-on-conflict=true\nclient.rm.table.meta.check.enable=true\nclient.rm.report.success.enable=true\nclient.tm.commit.retry.count=5\nclient.tm.rollback.retry.count=5\nstore.mode=db\nstore.file.dir=file_store/data\nstore.file.max-branch-session-size=16384\nstore.file.max-global-session-size=512\nstore.file.file-write-buffer-cache-size=16384\nstore.file.flush-disk-mode=async\nstore.file.session.reload.read_size=100\nstore.db.datasource=dbcp\nstore.db.db-type=mysql\nstore.db.driver-class-name=com.mysql.jdbc.Driver\nstore.db.url=jdbc:mysql://你的mysql所在ip:3306/seata?useUnicode=true\nstore.db.user=mysql帐号\nstore.db.password=mysql密码\nstore.db.min-conn=1\nstore.db.max-conn=3\nstore.db.global.table=global_table\nstore.db.branch.table=branch_table\nstore.db.query-limit=100\nstore.db.lock-table=lock_table\nserver.recovery.committing-retry-period=1000\nserver.recovery.asyn-committing-retry-period=1000\nserver.recovery.rollbacking-retry-period=1000\nserver.recovery.timeout-retry-period=1000\nserver.max.commit.retry.timeout=-1\nserver.max.rollback.retry.timeout=-1\nclient.undo.data.validation=true\nclient.undo.log.serialization=jackson\nserver.undo.log.save.days=7\nserver.undo.log.delete.period=86400000\nclient.undo.log.table=undo_log\ntransport.serialization=seata\ntransport.compressor=none\nmetrics.enabled=false\nmetrics.registry-type=compact\nmetrics.exporter-list=prometheus\nmetrics.exporter-prometheus-port=9898\nclient.support.spring.datasource.autoproxy=false\n
\n

详细参数配置请点此处

\n

​\t4.registry.conf编辑为如下内容

\n
registry {\n  # file 、nacos 、eureka、redis、zk、consul、etcd3、sofa\n  type = "nacos"\n\n  nacos {\n    serverAddr = "nacos容器ip:8848"\n    namespace = ""\n    cluster = "default"\n  }\n  eureka {\n    serviceUrl = "http://localhost:8761/eureka"\n    application = "default"\n    weight = "1"\n  }\n  redis {\n    serverAddr = "localhost:6379"\n    db = "0"\n  }\n  zk {\n    cluster = "default"\n    serverAddr = "127.0.0.1:2181"\n    session.timeout = 6000\n    connect.timeout = 2000\n  }\n  consul {\n    cluster = "default"\n    serverAddr = "127.0.0.1:8500"\n  }\n  etcd3 {\n    cluster = "default"\n    serverAddr = "http://localhost:2379"\n  }\n  sofa {\n    serverAddr = "127.0.0.1:9603"\n    application = "default"\n    region = "DEFAULT_ZONE"\n    datacenter = "DefaultDataCenter"\n    cluster = "default"\n    group = "SEATA_GROUP"\n    addressWaitTime = "3000"\n  }\n  file {\n    name = "file.conf"\n  }\n}\n\nconfig {\n  # file、nacos 、apollo、zk、consul、etcd3\n  type = "nacos"\n\n  nacos {\n    serverAddr = "nacos容器ip:8848"\n    namespace = ""\n  }\n  consul {\n    serverAddr = "127.0.0.1:8500"\n  }\n  apollo {\n    app.id = "seata-server"\n    apollo.meta = "http://192.168.1.204:8801"\n  }\n  zk {\n    serverAddr = "127.0.0.1:2181"\n    session.timeout = 6000\n    connect.timeout = 2000\n  }\n  etcd3 {\n    serverAddr = "http://localhost:2379"\n  }\n  file {\n    name = "file.conf"\n  }\n}\n
\n

​\t5.配置完成后使用如下命令,把修改完成的registry.conf复制到容器中,并重启查看日志运行

\n
docker cp /home/seata/resources/registry.conf seata:seata-server/resources/\ndocker restart seata\ndocker logs -f seata\n
\n

​\t6.修改nacos-config.sh

\n
for line in $(cat nacos-config.txt)\n\ndo\n\nkey=${line%%=*}\nvalue=${line#*=}\necho "\\r\\n set "${key}" = "${value}\n\nresult=`curl -X POST "http://nacos容器的ip:8848/nacos/v1/cs/configs?dataId=$key&group=SEATA_GROUP&content=$value"`\n\nif [ "$result"x == "true"x ]; then\n\n  echo "\\033[42;37m $result \\033[0m"\n\nelse\n\n  echo "\\033[41;37 $result \\033[0m"\n  let error++\n\nfi\n\ndone\n\n\nif [ $error -eq 0 ]; then\n\necho  "\\r\\n\\033[42;37m init nacos config finished, please start seata-server. \\033[0m"\n\nelse\n\necho  "\\r\\n\\033[41;33m init nacos config fail. \\033[0m"\n\nfi\n
\n

​\t7.运行nacos-config.sh将配置上传的nacos中,登录nacos控制中心查看

\n

\"20191202205912\"

\n

​\t如图所示便是成功了.

\n

进行调试

\n

​\t1.拉取博文中所示的项目,修改test-service的application.yml与registry.conf

\n
registry {\n  type = "nacos"\n  file {\n    name = "file.conf"\n  }\n  nacos {\n    serverAddr = "宿主机ip:8848"\n    namespace = ""\n    cluster = "default"\n  }\n}\nconfig {\n  type = "nacos"\n  file {\n    name = "file.conf"\n  }\n  zk {\n    serverAddr = "127.0.0.1:2181"\n    session.timeout = 6000\n    connect.timeout = 2000\n  }\n  nacos {\n    serverAddr = "宿主机ip:8848"\n    namespace = ""\n    cluster = "default"\n  }\n}\n\n
\n
server:\n  port: 38888\nspring:\n  application: \n      name: test-service\n  datasource:\n    type: com.alibaba.druid.pool.DruidDataSource\n    url: jdbc:mysql://mysqlip:3306/test?useUnicode=true&characterEncoding=UTF-8&serverTimezone=UTC\n    driver-class-name: com.mysql.cj.jdbc.Driver\n    username: root\n    password: 123456\ndubbo:\n  protocol:\n    threadpool: cached\n  scan:\n    base-packages: com.example\n  application:\n    qos-enable: false\n    name: testserver\n  registry:\n    id: my-registry\n    address:  nacos://宿主机ip:8848\nmybatis-plus:\n  mapper-locations: classpath:/mapper/*Mapper.xml\n  typeAliasesPackage: org.test.entity\n  global-config:\n    db-config:\n      field-strategy: not-empty\n      id-type: auto\n      db-type: mysql\n  configuration:\n    map-underscore-to-camel-case: true\n    cache-enabled: true\n    log-impl: org.apache.ibatis.logging.stdout.StdOutImpl\n    auto-mapping-unknown-column-behavior: none\n
\n

​\t2.把修改完成的registry.conf复制到test-client-resources中,并修改application

\n
spring:\n  application:\n     name: test\n  datasource:\n     driver-class-name: com.mysql.cj.jdbc.Driver\n     url: jdbc:mysql://mysqlIp:3306/test?userSSL=true&useUnicode=true&characterEncoding=UTF8&serverTimezone=Asia/Shanghai\n     username: root\n     password: 123456\n  mvc:\n    servlet:\n      load-on-startup: 1\n  http:\n    encoding:\n            force: true\n            charset: utf-8\n            enabled: true\n    multipart:\n      max-file-size: 10MB\n      max-request-size: 10MB\ndubbo:\n  registry:\n    id: my-registry\n    address:  nacos://宿主机ip:8848\n  application:\n    name: dubbo-demo-client\n    qos-enable: false\nserver:\n  port: 28888\n  max-http-header-size: 8192\n  address: 0.0.0.0\n  tomcat:\n    max-http-post-size: 104857600\n
\n

​\t4.依次运行test-service,test-client.

\n

​\t5.查看nacos中服务列表是否如下图所示

\n

\"20191203132351\"

\n

总结

\n

关于nacos与seata的docker部署已经完成了,更详细的内容希望希望大家访问以下地址阅读详细文档

\n

nacos官网

\n

dubbo官网

\n

seata官网

\n

docker官网

\n", - "link": "/zh-cn/blog/seata-nacos-docker.html", - "meta": { - "title": "Docker部署Seata与Nacos整合", - "keywords": "Seata,Nacos,分布式事务", - "description": "本文讲述如何使用Seata整合Nacos配置的Docker部署", - "author": "FUNKYE", - "date": "2019/12/03" - } -} \ No newline at end of file diff --git a/zh-cn/blog/springboot-dubbo-mybatisplus-seata.html b/zh-cn/blog/springboot-dubbo-mybatisplus-seata.html deleted file mode 100644 index 56c386e8..00000000 --- a/zh-cn/blog/springboot-dubbo-mybatisplus-seata.html +++ /dev/null @@ -1,1382 +0,0 @@ - - - - - - - - - - SpringBoot+Dubbo+MybatisPlus整合seata分布式事务 - - - - -

SpringBoot+Dubbo+MybatisPlus整合Seata分布式事务

-

项目地址

-

本文作者:FUNKYE(陈健斌),杭州某互联网公司主程。

-

前言

-

事务:事务是由一组操作构成的可靠的独立的工作单元,事务具备ACID的特性,即原子性、一致性、隔离性和持久性。 -​ 分布式事务:当一个操作牵涉到多个服务,多台数据库协力完成时(比如分表分库后,业务拆分),多个服务中,本地的Transaction已经无法应对这个情况了,为了保证数据一致性,就需要用到分布式事务。 -​ Seata :是一款开源的分布式事务解决方案,致力于在微服务架构下提供高性能和简单易用的分布式事务服务。 -​ 本文目的:现如今微服务越来越流行,而市面上的分布式事务的方案可谓不少,参差不齐,比较流行的以MQ代表的保证的是消息最终一致性的解决方案(消费确认,消息回查,消息补偿机制等),以及TX-LCN的LCN模式协调本地事务来保证事务统一提交或回滚(已经停止更新,对Dubbo2.7不兼容)。而MQ的分布式事务太过复杂,TX-LCN断更,这时候需要一个高效可靠及易上手的分布式事务解决方案,Seata脱颖而出,本文要介绍的就是如何快速搭建一个整合Seata的Demo项目,一起来吧!

-

准备工作

-

1.首先安装mysql,eclipse之类常用的工具,这不展开了.

-

2.访问seata下载中心地址我们使用的0.9.0版本

-

3.下载并解压seata-server

-

建库建表

-

1.首先我们链接mysql创建一个名为seata的数据库,然后运行一下建表sql,这个在seata-server的conf文件夹内的db_store.sql就是我的所需要使用的sql了.

-
/*
-Navicat MySQL Data Transfer
-Source Server         : mysql
-Source Server Version : 50721
-Source Host           : localhost:3306
-Source Database       : seata
-Target Server Type    : MYSQL
-Target Server Version : 50721
-File Encoding         : 65001
-Date: 2019-11-23 22:03:18
-*/
-
-SET FOREIGN_KEY_CHECKS=0;
-
--- ----------------------------
-
--- Table structure for branch_table
-
--- ----------------------------
-
-DROP TABLE IF EXISTS `branch_table`;
-CREATE TABLE `branch_table` (
-  `branch_id` bigint(20) NOT NULL,
-  `xid` varchar(128) NOT NULL,
-  `transaction_id` bigint(20) DEFAULT NULL,
-  `resource_group_id` varchar(32) DEFAULT NULL,
-  `resource_id` varchar(256) DEFAULT NULL,
-  `lock_key` varchar(128) DEFAULT NULL,
-  `branch_type` varchar(8) DEFAULT NULL,
-  `status` tinyint(4) DEFAULT NULL,
-  `client_id` varchar(64) DEFAULT NULL,
-  `application_data` varchar(2000) DEFAULT NULL,
-  `gmt_create` datetime DEFAULT NULL,
-  `gmt_modified` datetime DEFAULT NULL,
-  PRIMARY KEY (`branch_id`),
-  KEY `idx_xid` (`xid`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
-
--- ----------------------------
-
--- Records of branch_table
-
--- ----------------------------
-
--- ----------------------------
-
--- Table structure for global_table
-
--- ----------------------------
-
-DROP TABLE IF EXISTS `global_table`;
-CREATE TABLE `global_table` (
-  `xid` varchar(128) NOT NULL,
-  `transaction_id` bigint(20) DEFAULT NULL,
-  `status` tinyint(4) NOT NULL,
-  `application_id` varchar(32) DEFAULT NULL,
-  `transaction_service_group` varchar(32) DEFAULT NULL,
-  `transaction_name` varchar(128) DEFAULT NULL,
-  `timeout` int(11) DEFAULT NULL,
-  `begin_time` bigint(20) DEFAULT NULL,
-  `application_data` varchar(2000) DEFAULT NULL,
-  `gmt_create` datetime DEFAULT NULL,
-  `gmt_modified` datetime DEFAULT NULL,
-  PRIMARY KEY (`xid`),
-  KEY `idx_gmt_modified_status` (`gmt_modified`,`status`),
-  KEY `idx_transaction_id` (`transaction_id`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
-
--- ----------------------------
-
--- Records of global_table
-
--- ----------------------------
-
--- ----------------------------
-
--- Table structure for lock_table
-
--- ----------------------------
-
-DROP TABLE IF EXISTS `lock_table`;
-CREATE TABLE `lock_table` (
-  `row_key` varchar(128) NOT NULL,
-  `xid` varchar(96) DEFAULT NULL,
-  `transaction_id` mediumtext,
-  `branch_id` mediumtext,
-  `resource_id` varchar(256) DEFAULT NULL,
-  `table_name` varchar(32) DEFAULT NULL,
-  `pk` varchar(36) DEFAULT NULL,
-  `gmt_create` datetime DEFAULT NULL,
-  `gmt_modified` datetime DEFAULT NULL,
-  PRIMARY KEY (`row_key`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
-
--- ----------------------------
-
--- Records of lock_table
-
--- ----------------------------
-
--- ----------------------------
-
--- Table structure for undo_log
-
--- ----------------------------
-
-DROP TABLE IF EXISTS `undo_log`;
-CREATE TABLE `undo_log` (
-  `id` bigint(20) NOT NULL AUTO_INCREMENT,
-  `branch_id` bigint(20) NOT NULL,
-  `xid` varchar(100) NOT NULL,
-  `context` varchar(128) NOT NULL,
-  `rollback_info` longblob NOT NULL,
-  `log_status` int(11) NOT NULL,
-  `log_created` datetime NOT NULL,
-  `log_modified` datetime NOT NULL,
-  `ext` varchar(100) DEFAULT NULL,
-  PRIMARY KEY (`id`),
-  UNIQUE KEY `ux_undo_log` (`xid`,`branch_id`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-
--- ----------------------------
-
--- Records of undo_log
-
-

2.运行完上面的seata所需要的数据库后,我们进行搭建我们所需要写的demo的库,创建一个名为test的数据库,然后执行以下sql代码:

-
/*
-Navicat MySQL Data Transfer
-Source Server         : mysql
-Source Server Version : 50721
-Source Host           : localhost:3306
-Source Database       : test
-Target Server Type    : MYSQL
-Target Server Version : 50721
-File Encoding         : 65001
-Date: 2019-11-23 22:03:24
-*/
-
-SET FOREIGN_KEY_CHECKS=0;
-
--- ----------------------------
-
--- Table structure for test
-
--- ----------------------------
-
-DROP TABLE IF EXISTS `test`;
-CREATE TABLE `test` (
-  `id` int(11) NOT NULL AUTO_INCREMENT,
-  `one` varchar(255) DEFAULT NULL,
-  `two` varchar(255) DEFAULT NULL,
-  `createTime` datetime DEFAULT NULL,
-  PRIMARY KEY (`id`)
-) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=utf8mb4;
-
--- ----------------------------
-
--- Records of test
-
--- ----------------------------
-
-INSERT INTO `test` VALUES ('1', '1', '2', '2019-11-23 16:07:34');
-
--- ----------------------------
-
--- Table structure for undo_log
-
--- ----------------------------
-
-DROP TABLE IF EXISTS `undo_log`;
-CREATE TABLE `undo_log` (
-  `id` bigint(20) NOT NULL AUTO_INCREMENT,
-  `branch_id` bigint(20) NOT NULL,
-  `xid` varchar(100) NOT NULL,
-  `context` varchar(128) NOT NULL,
-  `rollback_info` longblob NOT NULL,
-  `log_status` int(11) NOT NULL,
-  `log_created` datetime NOT NULL,
-  `log_modified` datetime NOT NULL,
-  `ext` varchar(100) DEFAULT NULL,
-  PRIMARY KEY (`id`),
-  UNIQUE KEY `ux_undo_log` (`xid`,`branch_id`)
-) ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=utf8;
-
--- ----------------------------
-
--- Records of undo_log
-
-

3.我们找到seata-server/conf 文件夹内的file编辑它:20191129132933

-

4.再次找到其中的db配置方法块,更改方法如下图:

-

好了,可以到bin目录去./seata-server.bat 运行看看了

-

创建项目

-

​ 首先我们使用的是eclipse,当然你也可以用idea之类的工具,详细请按下面步骤来运行

-

​ 1.创建一个新的maven项目,并删除多余文件夹:2019112913335420191129133441

-

​ 2.打开项目的pom.xml,加入以下依赖:

-
	<properties>
-		<webVersion>3.1</webVersion>
-		<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
-		<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
-		<maven.compiler.source>1.8</maven.compiler.source>
-		<maven.compiler.target>1.8</maven.compiler.target>
-		<HikariCP.version>3.2.0</HikariCP.version>
-		<mybatis-plus-boot-starter.version>3.2.0</mybatis-plus-boot-starter.version>
-	</properties>
-	<parent>
-		<groupId>org.springframework.boot</groupId>
-		<artifactId>spring-boot-starter-parent</artifactId>
-		<version>2.1.8.RELEASE</version>
-	</parent>
-	<dependencies>
-		<dependency>
-			<groupId>org.apache.curator</groupId>
-			<artifactId>curator-framework</artifactId>
-			<version>4.2.0</version>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.curator</groupId>
-			<artifactId>curator-recipes</artifactId>
-			<version>4.2.0</version>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.dubbo</groupId>
-			<artifactId>dubbo-spring-boot-starter</artifactId>
-			<version>2.7.4.1</version>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.commons</groupId>
-			<artifactId>commons-lang3</artifactId>
-			</dependency>
-		<dependency>
-			<groupId>com.alibaba</groupId>
-			<artifactId>fastjson</artifactId>
-			<version>1.2.60</version>
-		</dependency>
-		<!-- <dependency> <groupId>javax</groupId> <artifactId>javaee-api</artifactId> 
-			<version>7.0</version> <scope>provided</scope> </dependency> -->
-		<dependency>
-			<groupId>io.springfox</groupId>
-			<artifactId>springfox-swagger2</artifactId>
-			<version>2.9.2</version>
-		</dependency>
-		<dependency>
-			<groupId>io.springfox</groupId>
-			<artifactId>springfox-swagger-ui</artifactId>
-			<version>2.9.2</version>
-		</dependency>
- 
-		<!-- mybatis-plus begin -->
-		<dependency>
-			<groupId>com.baomidou</groupId>
-			<artifactId>mybatis-plus-boot-starter</artifactId>
-			<version>${mybatis-plus-boot-starter.version}</version>
-		</dependency>
-		<!-- mybatis-plus end -->
-		<!-- https://mvnrepository.com/artifact/org.projectlombok/lombok -->
-		<dependency>
-			<groupId>org.projectlombok</groupId>
-			<artifactId>lombok</artifactId>
-			<scope>provided</scope>
-		</dependency>
-		<dependency>
-			<groupId>io.seata</groupId>
-			<artifactId>seata-all</artifactId>
-			<version>0.9.0.1</version>
-		</dependency>
-		<!-- Zookeeper -->
-		<dependency>
-			<groupId>org.apache.zookeeper</groupId>
-			<artifactId>zookeeper</artifactId>
-			<version>3.4.9</version>
-			<exclusions>
-				<exclusion>
-					<groupId>org.slf4j</groupId>
-					<artifactId>slf4j-log4j12</artifactId>
-				</exclusion>
-			</exclusions>
-		</dependency>
-		<!-- <dependency> <groupId>com.baomidou</groupId> <artifactId>dynamic-datasource-spring-boot-starter</artifactId> 
-			<version>2.5.4</version> </dependency> -->
- 
-		<!-- <dependency> <groupId>com.baomidou</groupId> <artifactId>mybatis-plus-generator</artifactId> 
-			<version>3.1.0</version> </dependency> -->
-		<!-- https://mvnrepository.com/artifact/org.freemarker/freemarker -->
-		<dependency>
-			<groupId>org.freemarker</groupId>
-			<artifactId>freemarker</artifactId>
-		</dependency>
-		<!-- https://mvnrepository.com/artifact/com.alibaba/druid-spring-boot-starter -->
-		<dependency>
-			<groupId>com.alibaba</groupId>
-			<artifactId>druid-spring-boot-starter</artifactId>
-			<version>1.1.20</version>
-		</dependency>
-		<!-- 加上这个才能辨认到log4j2.yml文件 -->
-		<dependency>
-			<groupId>com.fasterxml.jackson.dataformat</groupId>
-			<artifactId>jackson-dataformat-yaml</artifactId>
-		</dependency>
-		<dependency> <!-- 引入log4j2依赖 -->
-			<groupId>org.springframework.boot</groupId>
-			<artifactId>spring-boot-starter-log4j2</artifactId>
-		</dependency>
-		<!-- https://mvnrepository.com/artifact/mysql/mysql-connector-java -->
-		<dependency>
-			<groupId>mysql</groupId>
-			<artifactId>mysql-connector-java</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>org.springframework.boot</groupId>
-			<artifactId>spring-boot-starter-web</artifactId>
-			<exclusions>
-				<exclusion>
-					<groupId>org.springframework.boot</groupId>
-					<artifactId>spring-boot-starter-logging</artifactId>
-				</exclusion>
-				<exclusion>
-					<groupId>org.slf4j</groupId>
-					<artifactId>slf4j-log4j12</artifactId>
-				</exclusion>
-			</exclusions>
-		</dependency>
-		<dependency>
-			<groupId>org.springframework.boot</groupId>
-			<artifactId>spring-boot-starter-aop</artifactId>
-		</dependency>
-		<dependency>
-			<groupId>org.springframework.boot</groupId>
-			<artifactId>spring-boot-starter-test</artifactId>
-			<scope>test</scope>
-		</dependency>
-		<!-- <dependency> <groupId>org.scala-lang</groupId> <artifactId>scala-library</artifactId> 
-			<version>2.11.0</version> </dependency> -->
-		<dependency>
-			<groupId>org.springframework.boot</groupId>
-			<artifactId>spring-boot-configuration-processor</artifactId>
-			<optional>true</optional>
-		</dependency>
-	</dependencies>
-
-
-

​ 3.再切换父项目为pom模式,还是pom文件,切换为 overview ,做如图操作:20191129134127

-

4.创建我们的demo子项目,test-service:20191129135935

-

​ 目录如下:

-20191129140048 -
创建EmbeddedZooKeeper.java文件,跟 ProviderApplication.java,代码如下:
-
-
package org.test;
- 
-import java.io.File;
-import java.lang.reflect.Method;
-import java.util.Properties;
-import java.util.UUID;
- 
-import org.apache.zookeeper.server.ServerConfig;
-import org.apache.zookeeper.server.ZooKeeperServerMain;
-import org.apache.zookeeper.server.quorum.QuorumPeerConfig;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.context.SmartLifecycle;
-import org.springframework.util.ErrorHandler;
-import org.springframework.util.SocketUtils;
- 
-/**
- * from:
- * https://github.com/spring-projects/spring-xd/blob/v1.3.1.RELEASE/spring-xd-dirt/src/main/java/org/springframework/xd/dirt/zookeeper/ZooKeeperUtils.java
- * 
- * Helper class to start an embedded instance of standalone (non clustered) ZooKeeper.
- * 
- * NOTE: at least an external standalone server (if not an ensemble) are recommended, even for
- * {@link org.springframework.xd.dirt.server.singlenode.SingleNodeApplication}
- * 
- * @author Patrick Peralta
- * @author Mark Fisher
- * @author David Turanski
- */
-public class EmbeddedZooKeeper implements SmartLifecycle {
- 
-    /**
-     * Logger.
-     */
-    private static final Logger logger = LoggerFactory.getLogger(EmbeddedZooKeeper.class);
- 
-    /**
-     * ZooKeeper client port. This will be determined dynamically upon startup.
-     */
-    private final int clientPort;
- 
-    /**
-     * Whether to auto-start. Default is true.
-     */
-    private boolean autoStartup = true;
- 
-    /**
-     * Lifecycle phase. Default is 0.
-     */
-    private int phase = 0;
- 
-    /**
-     * Thread for running the ZooKeeper server.
-     */
-    private volatile Thread zkServerThread;
- 
-    /**
-     * ZooKeeper server.
-     */
-    private volatile ZooKeeperServerMain zkServer;
- 
-    /**
-     * {@link ErrorHandler} to be invoked if an Exception is thrown from the ZooKeeper server thread.
-     */
-    private ErrorHandler errorHandler;
- 
-    private boolean daemon = true;
- 
-    /**
-     * Construct an EmbeddedZooKeeper with a random port.
-     */
-    public EmbeddedZooKeeper() {
-        clientPort = SocketUtils.findAvailableTcpPort();
-    }
- 
-    /**
-     * Construct an EmbeddedZooKeeper with the provided port.
-     *
-     * @param clientPort
-     *            port for ZooKeeper server to bind to
-     */
-    public EmbeddedZooKeeper(int clientPort, boolean daemon) {
-        this.clientPort = clientPort;
-        this.daemon = daemon;
-    }
- 
-    /**
-     * Returns the port that clients should use to connect to this embedded server.
-     * 
-     * @return dynamically determined client port
-     */
-    public int getClientPort() {
-        return this.clientPort;
-    }
- 
-    /**
-     * Specify whether to start automatically. Default is true.
-     * 
-     * @param autoStartup
-     *            whether to start automatically
-     */
-    public void setAutoStartup(boolean autoStartup) {
-        this.autoStartup = autoStartup;
-    }
- 
-    /**
-     * {@inheritDoc}
-     */
-    public boolean isAutoStartup() {
-        return this.autoStartup;
-    }
- 
-    /**
-     * Specify the lifecycle phase for the embedded server.
-     * 
-     * @param phase
-     *            the lifecycle phase
-     */
-    public void setPhase(int phase) {
-        this.phase = phase;
-    }
- 
-    /**
-     * {@inheritDoc}
-     */
-    public int getPhase() {
-        return this.phase;
-    }
- 
-    /**
-     * {@inheritDoc}
-     */
-    public boolean isRunning() {
-        return (zkServerThread != null);
-    }
- 
-    /**
-     * Start the ZooKeeper server in a background thread.
-     * <p>
-     * Register an error handler via {@link #setErrorHandler} in order to handle any exceptions thrown during startup or
-     * execution.
-     */
-    public synchronized void start() {
-        if (zkServerThread == null) {
-            zkServerThread = new Thread(new ServerRunnable(), "ZooKeeper Server Starter");
-            zkServerThread.setDaemon(daemon);
-            zkServerThread.start();
-        }
-    }
- 
-    /**
-     * Shutdown the ZooKeeper server.
-     */
-    public synchronized void stop() {
-        if (zkServerThread != null) {
-            // The shutdown method is protected...thus this hack to invoke it.
-            // This will log an exception on shutdown; see
-            // https://issues.apache.org/jira/browse/ZOOKEEPER-1873 for details.
-            try {
-                Method shutdown = ZooKeeperServerMain.class.getDeclaredMethod("shutdown");
-                shutdown.setAccessible(true);
-                shutdown.invoke(zkServer);
-            }
- 
-            catch (Exception e) {
-                throw new RuntimeException(e);
-            }
- 
-            // It is expected that the thread will exit after
-            // the server is shutdown; this will block until
-            // the shutdown is complete.
-            try {
-                zkServerThread.join(5000);
-                zkServerThread = null;
-            } catch (InterruptedException e) {
-                Thread.currentThread().interrupt();
-                logger.warn("Interrupted while waiting for embedded ZooKeeper to exit");
-                // abandoning zk thread
-                zkServerThread = null;
-            }
-        }
-    }
- 
-    /**
-     * Stop the server if running and invoke the callback when complete.
-     */
-    public void stop(Runnable callback) {
-        stop();
-        callback.run();
-    }
- 
-    /**
-     * Provide an {@link ErrorHandler} to be invoked if an Exception is thrown from the ZooKeeper server thread. If none
-     * is provided, only error-level logging will occur.
-     * 
-     * @param errorHandler
-     *            the {@link ErrorHandler} to be invoked
-     */
-    public void setErrorHandler(ErrorHandler errorHandler) {
-        this.errorHandler = errorHandler;
-    }
- 
-    /**
-     * Runnable implementation that starts the ZooKeeper server.
-     */
-    private class ServerRunnable implements Runnable {
- 
-        public void run() {
-            try {
-                Properties properties = new Properties();
-                File file = new File(System.getProperty("java.io.tmpdir") + File.separator + UUID.randomUUID());
-                file.deleteOnExit();
-                properties.setProperty("dataDir", file.getAbsolutePath());
-                properties.setProperty("clientPort", String.valueOf(clientPort));
- 
-                QuorumPeerConfig quorumPeerConfig = new QuorumPeerConfig();
-                quorumPeerConfig.parseProperties(properties);
- 
-                zkServer = new ZooKeeperServerMain();
-                ServerConfig configuration = new ServerConfig();
-                configuration.readFrom(quorumPeerConfig);
- 
-                zkServer.runFromConfig(configuration);
-            } catch (Exception e) {
-                if (errorHandler != null) {
-                    errorHandler.handleError(e);
-                } else {
-                    logger.error("Exception running embedded ZooKeeper", e);
-                }
-            }
-        }
-    }
- 
-}
-
-
-
package org.test;
- 
-import org.apache.dubbo.config.spring.context.annotation.DubboComponentScan;
-import org.springframework.boot.SpringApplication;
-import org.springframework.boot.autoconfigure.SpringBootApplication;
-import org.springframework.context.annotation.ComponentScan;
-import org.springframework.transaction.annotation.EnableTransactionManagement;
- 
-/**
- * 
- * @author cjb
- * @date 2019/10/24
- */
-@EnableTransactionManagement
-@ComponentScan(basePackages = {"org.test.config", "org.test.service.impl"})
-@DubboComponentScan(basePackages = "org.test.service.impl")
-@SpringBootApplication
-public class ProviderApplication {
- 
-    public static void main(String[] args) {
-        new EmbeddedZooKeeper(2181, false).start();
-        SpringApplication app = new SpringApplication(ProviderApplication.class);
-        app.run(args);
-    }
- 
-}
-
-
-
创建实体包 org.test.entity以及创建实体类Test 用到了lombok,详细百度一下,eclipse装lombok插件
-
-
package org.test.entity;
- 
-import java.io.Serializable;
-import java.time.LocalDateTime;
- 
-import com.baomidou.mybatisplus.annotation.IdType;
-import com.baomidou.mybatisplus.annotation.TableField;
-import com.baomidou.mybatisplus.annotation.TableId;
- 
-import io.swagger.annotations.ApiModel;
-import io.swagger.annotations.ApiModelProperty;
-import lombok.Data;
-import lombok.EqualsAndHashCode;
-import lombok.experimental.Accessors;
- 
-/**
- * <p>
- * 功能
- * </p>
- *
- * @author Funkye
- * @since 2019-04-23
- */
-@Data
-@EqualsAndHashCode(callSuper = false)
-@Accessors(chain = true)
-@ApiModel(value = "test对象", description = "功能")
-public class Test implements Serializable {
- 
-    private static final long serialVersionUID = 1L;
- 
-    @ApiModelProperty(value = "主键")
-    @TableId(value = "id", type = IdType.AUTO)
-    private Integer id;
- 
-    @ApiModelProperty(value = "one")
-    @TableField("one")
-    private String one;
- 
-    @ApiModelProperty(value = "two")
-    @TableField("two")
-    private String two;
- 
-    @ApiModelProperty(value = "createTime")
-    @TableField("createTime")
-    private LocalDateTime createTime;
- 
-}
-
-
-

​ 创建service,service.impl,mapper等包,依次创建ITestservice,以及实现类,mapper

-
package org.test.service;
- 
-import org.test.entity.Test;
- 
-import com.baomidou.mybatisplus.extension.service.IService; 
- 
-/**
- * <p>
- * 功能 服务类
- * </p>
- *
- * @author Funkye
- * @since 2019-04-10
- */
-public interface ITestService extends IService<Test> {
- 
-}
-
-
-
package org.test.service.impl;
- 
- 
- 
- 
-import org.apache.dubbo.config.annotation.Service;
-import org.test.entity.Test;
-import org.test.mapper.TestMapper;
-import org.test.service.ITestService;
- 
-import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl;
- 
-@Service(version = "1.0.0",interfaceClass =ITestService.class )
-public class TestServiceImpl extends ServiceImpl<TestMapper, Test> implements ITestService {
- 
-}
-
-
-
package org.test.mapper;
- 
-import org.test.entity.Test; 
- 
-import com.baomidou.mybatisplus.core.mapper.BaseMapper;
- 
-/**
- * <p>
- * 功能 Mapper 接口
- * </p>
- *
- * @author Funkye
- * @since 2019-04-10
- */
-public interface TestMapper extends BaseMapper<Test> {
- 
-}
-
-
-
创建org.test.config包,创建SeataAutoConfig.java,配置信息都在此处,主要作用为代理数据,连接事务服务分组 
-
-
package org.test.config;
-
-import javax.sql.DataSource;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.beans.factory.annotation.Autowired;
-import org.springframework.beans.factory.annotation.Qualifier;
-import org.springframework.boot.autoconfigure.jdbc.DataSourceProperties;
-import org.springframework.context.annotation.Bean;
-import org.springframework.context.annotation.Configuration;
-import org.springframework.context.annotation.Primary;
-
-import com.alibaba.druid.pool.DruidDataSource;
-
-import io.seata.rm.datasource.DataSourceProxy;
-import io.seata.spring.annotation.GlobalTransactionScanner;
-
-@Configuration
-public class SeataAutoConfig {
-	@Autowired(required = true)
-	private DataSourceProperties dataSourceProperties;
-	private final static Logger logger = LoggerFactory.getLogger(SeataAutoConfig.class);
-
-	@Bean(name = "druidDataSource") // 声明其为Bean实例
-	public DataSource druidDataSource() {
-		DruidDataSource druidDataSource = new DruidDataSource();
-		logger.info("dataSourceProperties.getUrl():{}", dataSourceProperties.getUrl());
-		druidDataSource.setUrl(dataSourceProperties.getUrl());
-		druidDataSource.setUsername(dataSourceProperties.getUsername());
-		druidDataSource.setPassword(dataSourceProperties.getPassword());
-		druidDataSource.setDriverClassName(dataSourceProperties.getDriverClassName());
-		druidDataSource.setInitialSize(0);
-		druidDataSource.setMaxActive(180);
-		druidDataSource.setMaxWait(60000);
-		druidDataSource.setMinIdle(0);
-		druidDataSource.setValidationQuery("Select 1 from DUAL");
-		druidDataSource.setTestOnBorrow(false);
-		druidDataSource.setTestOnReturn(false);
-		druidDataSource.setTestWhileIdle(true);
-		druidDataSource.setTimeBetweenEvictionRunsMillis(60000);
-		druidDataSource.setMinEvictableIdleTimeMillis(25200000);
-		druidDataSource.setRemoveAbandoned(true);
-		druidDataSource.setRemoveAbandonedTimeout(1800);
-		druidDataSource.setLogAbandoned(true);
-		logger.info("装载dataSource........");
-		return druidDataSource;
-	}
-
-	/**
-	 * init datasource proxy
-	 * 
-	 * @Param: druidDataSource datasource bean instance
-	 * @Return: DataSourceProxy datasource proxy
-	 */
-	@Bean(name = "dataSource")
-	@Primary // 在同样的DataSource中,首先使用被标注的DataSource
-	public DataSourceProxy dataSourceProxy(@Qualifier(value = "druidDataSource") DruidDataSource druidDataSource) {
-		logger.info("代理dataSource........");
-		return new DataSourceProxy(druidDataSource);
-	}
-
-	/**
-	 * init global transaction scanner
-	 *
-	 * @Return: GlobalTransactionScanner
-	 */
-	@Bean
-	public GlobalTransactionScanner globalTransactionScanner() {
-		logger.info("配置seata........");
-		return new GlobalTransactionScanner("test-service", "test-group");
-	}
-}
-
-
再创建mybatisplus所需的配置文件MybatisPlusConfig  
-
-
package org.test.config;
- 
-import java.util.ArrayList;
-import java.util.List;
- 
-import org.mybatis.spring.mapper.MapperScannerConfigurer;
-import org.springframework.context.annotation.Bean;
-import org.springframework.context.annotation.Configuration;
- 
-import com.baomidou.mybatisplus.core.parser.ISqlParser;
-import com.baomidou.mybatisplus.extension.parsers.BlockAttackSqlParser;
-import com.baomidou.mybatisplus.extension.plugins.PaginationInterceptor;
- 
-@Configuration
-// @MapperScan("com.baomidou.springboot.mapper*")//这个注解,作用相当于下面的@Bean
-// MapperScannerConfigurer,2者配置1份即可
-public class MybatisPlusConfig {
- 
-    /**
-     * mybatis-plus分页插件<br>
-     * 文档:http://mp.baomidou.com<br>
-     */
-    @Bean
-    public PaginationInterceptor paginationInterceptor() {
-        PaginationInterceptor paginationInterceptor = new PaginationInterceptor();
-        List<ISqlParser> sqlParserList = new ArrayList<ISqlParser>();
-        // 攻击 SQL 阻断解析器、加入解析链
-        sqlParserList.add(new BlockAttackSqlParser());
-        paginationInterceptor.setSqlParserList(sqlParserList);
-        return paginationInterceptor;
-    }
- 
-    /**
-     * 相当于顶部的: {@code @MapperScan("com.baomidou.springboot.mapper*")} 这里可以扩展,比如使用配置文件来配置扫描Mapper的路径
-     */
- 
-    @Bean
-    public MapperScannerConfigurer mapperScannerConfigurer() {
-        MapperScannerConfigurer scannerConfigurer = new MapperScannerConfigurer();
-        scannerConfigurer.setBasePackage("org.test.mapper");
-        return scannerConfigurer;
-    }
- 
-}
-
-
-

​ 再创建resources目录,创建mapper文件夹,application.yml等文件

-
server:
-  port: 38888
-spring:
-  application: 
-      name: test-service
-  datasource:
-    type: com.alibaba.druid.pool.DruidDataSource
-    url: jdbc:mysql://127.0.0.1:3306/test?useUnicode=true&characterEncoding=UTF-8&serverTimezone=UTC
-    driver-class-name: com.mysql.cj.jdbc.Driver
-    username: root
-    password: 123456
-dubbo:
-  protocol:
-    loadbalance: leastactive
-    threadpool: cached
-  scan:
-    base-packages: org。test.service
-  application:
-    qos-enable: false
-    name: testserver
-  registry:
-    id: my-registry
-    address:  zookeeper://127.0.0.1:2181?client=curator
-mybatis-plus:
-  mapper-locations: classpath:/mapper/*Mapper.xml
-  typeAliasesPackage: org.test.entity
-  global-config:
-    db-config:
-      field-strategy: not-empty
-      id-type: auto
-      db-type: mysql
-  configuration:
-    map-underscore-to-camel-case: true
-    cache-enabled: true      
-    auto-mapping-unknown-column-behavior: none
-
-
-

​ 创建file.conf,此处的service 内的vgroup_mapping.你的事务分组,比如上面SeataAutoConfig内配置了test-group,那么这里也要改为test-group,然后下面ip端口都是seata运行的ip跟端口就行了

-
transport {
-  type = "TCP"
-  server = "NIO"
-  heartbeat = true
-  thread-factory {
-    boss-thread-prefix = "NettyBoss"
-    worker-thread-prefix = "NettyServerNIOWorker"
-    server-executor-thread-prefix = "NettyServerBizHandler"
-    share-boss-worker = false
-    client-selector-thread-prefix = "NettyClientSelector"
-    client-selector-thread-size = 1
-    client-worker-thread-prefix = "NettyClientWorkerThread"
-    boss-thread-size = 1
-    worker-thread-size = 8
-  }
-  shutdown {
-    wait = 3
-  }
-  serialization = "seata"
-  compressor = "none"
-}
-service {
-  vgroup_mapping.test-group = "default"
-  default.grouplist = "127.0.0.1:8091"
-  enableDegrade = false
-  disable = false
-  max.commit.retry.timeout = "-1"
-  max.rollback.retry.timeout = "-1"
-}
- 
-client {
-  async.commit.buffer.limit = 10000
-  lock {
-    retry.internal = 10
-    retry.times = 30
-  }
-  report.retry.count = 5
-  tm.commit.retry.count = 1
-  tm.rollback.retry.count = 1
-  undo.log.table = "undo_log"
-}
- 
-recovery {
-  committing-retry-period = 1000
-  asyn-committing-retry-period = 1000
-  rollbacking-retry-period = 1000
-  timeout-retry-period = 1000
-}
- 
-transaction {
-  undo.data.validation = true
-  undo.log.serialization = "jackson"
-  undo.log.save.days = 7
-  undo.log.delete.period = 86400000
-  undo.log.table = "undo_log"
-}
- 
-metrics {
-  enabled = false
-  registry-type = "compact"
-  exporter-list = "prometheus"
-  exporter-prometheus-port = 9898
-}
- 
-support {
-  spring {
-    datasource.autoproxy = false
-  }
-}
-
-
-

创建registry.conf,来指定file,zk的ip端口之类的配置

-
registry {
-  type = "file"
-  file {
-    name = "file.conf"
-  }
-}
-config {
-  type = "file"
-  file {
-    name = "file.conf"
-  }
-  zk {
-    serverAddr = "127.0.0.1:2181"
-    session.timeout = 6000
-    connect.timeout = 2000
-  }
-}
-
-
-

​ 大功告成,可以直接运行啦,这时候观察seata-server20191129142115

-

​ 接下来我们创建test-client项目项目,这里就不赘述了,跟上面的test-service一样的创建方式

-

​ 接下来我们复制test-service内的service跟实体过去,当然你嫌麻烦,可以单独搞个子项目放通用的service跟实体,一些工具类等等,我这边为了快速搭建这个demo,就选择复制黏贴的方式了.

-

目录结构:

-
然后我们创建ClientApplication:
-
-
package org.test;
- 
-import java.util.TimeZone;
-import java.util.concurrent.Executor;
- 
-import org.apache.dubbo.config.spring.context.annotation.EnableDubbo;
-import org.springframework.boot.SpringApplication;
-import org.springframework.boot.autoconfigure.SpringBootApplication;
-import org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration;
-import org.springframework.context.annotation.Bean;
-import org.springframework.context.annotation.ComponentScan;
-import org.springframework.context.annotation.Configuration;
-import org.springframework.scheduling.annotation.EnableAsync;
-import org.springframework.scheduling.annotation.EnableScheduling;
-import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
- 
-import com.baomidou.mybatisplus.autoconfigure.MybatisPlusAutoConfiguration;
- 
-@SpringBootApplication(exclude = {DataSourceAutoConfiguration.class, MybatisPlusAutoConfiguration.class})
-@EnableScheduling
-@EnableAsync
-@Configuration
-@EnableDubbo(scanBasePackages = {"org.test.service"})
-@ComponentScan(basePackages = {"org.test.service", "org.test.controller", "org.test.config"})
-public class ClientApplication {
-    public static void main(String[] args) {
-        TimeZone.setDefault(TimeZone.getTimeZone("Asia/Shanghai"));
-        SpringApplication app = new SpringApplication(ClientApplication.class);
-        app.run(args);
-    }
- 
-    @Bean(name = "threadPoolTaskExecutor")
-    public Executor threadPoolTaskExecutor() {
-        return new ThreadPoolTaskExecutor();
-    }
-}
-
-
-

再到config包内创建SwaggerConfig :

-
package org.test.config;
- 
-import java.util.ArrayList;
-import java.util.List;
- 
-import org.springframework.context.annotation.Bean;
-import org.springframework.context.annotation.Configuration;
- 
-import springfox.documentation.builders.ApiInfoBuilder;
-import springfox.documentation.builders.PathSelectors;
-import springfox.documentation.builders.RequestHandlerSelectors;
-import springfox.documentation.service.ApiInfo;
-import springfox.documentation.service.Contact;
-import springfox.documentation.service.Parameter;
-import springfox.documentation.spi.DocumentationType;
-import springfox.documentation.spring.web.plugins.Docket;
-import springfox.documentation.swagger2.annotations.EnableSwagger2;
- 
-@Configuration
-@EnableSwagger2
-public class SwaggerConfig {
-    // swagger2的配置文件,这里可以配置swagger2的一些基本的内容,比如扫描的包等等
-    @Bean
-    public Docket createRestApi() {
-        List<Parameter> pars = new ArrayList<Parameter>();
-        return new Docket(DocumentationType.SWAGGER_2).apiInfo(apiInfo()).select()
-            // 为当前包路径
-            .apis(RequestHandlerSelectors.basePackage("org.test.controller")).paths(PathSelectors.any()).build()
-            .globalOperationParameters(pars);
-    }
- 
-    // 构建 api文档的详细信息函数,注意这里的注解引用的是哪个
-    private ApiInfo apiInfo() {
-        return new ApiInfoBuilder()
-            // 页面标题
-            .title("项目接口")
-            // 创建人
-            .contact(new Contact("FUNKYE", "", ""))
-            // 版本号
-            .version("1.0")
-            // 描述
-            .description("API 描述").build();
-    }
-}
-
-
-

​ 再创建SpringMvcConfigure,再里面放入seata的配置,我为了偷懒直接集成在mvc配置的类里了,大家规范点可以另外创建个配置seata的类,大家可以发现下面还是有个组名称,我把两个项目都分配到一个组去,貌似另外取一个也没事的.

-
package org.test.config;
- 
-import java.nio.charset.Charset; 
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
- 
-import org.apache.dubbo.config.annotation.Reference;
-import org.springframework.boot.web.servlet.FilterRegistrationBean;
-import org.springframework.context.annotation.Bean;
-import org.springframework.context.annotation.Configuration;
-import org.springframework.core.Ordered;
-import org.springframework.http.MediaType;
-import org.springframework.http.converter.HttpMessageConverter;
-import org.springframework.http.converter.StringHttpMessageConverter;
-import org.springframework.web.cors.CorsConfiguration;
-import org.springframework.web.cors.UrlBasedCorsConfigurationSource;
-import org.springframework.web.filter.CorsFilter;
-import org.springframework.web.servlet.HandlerInterceptor;
-import org.springframework.web.servlet.config.annotation.InterceptorRegistry;
-import org.springframework.web.servlet.config.annotation.WebMvcConfigurer;
-import org.springframework.web.servlet.view.InternalResourceViewResolver;
- 
-import com.alibaba.fastjson.serializer.SerializerFeature;
-import com.alibaba.fastjson.support.config.FastJsonConfig;
-import com.alibaba.fastjson.support.spring.FastJsonHttpMessageConverter;
-import com.google.common.collect.Maps;
- 
-import io.seata.spring.annotation.GlobalTransactionScanner;
- 
-@Configuration
-public class SpringMvcConfigure implements WebMvcConfigurer {
- 
-    @Bean
-    public FilterRegistrationBean corsFilter() {
-        UrlBasedCorsConfigurationSource source = new UrlBasedCorsConfigurationSource();
-        CorsConfiguration config = new CorsConfiguration();
-        config.setAllowCredentials(true);
-        config.addAllowedOrigin("*");
-        config.addAllowedHeader(CorsConfiguration.ALL);
-        config.addAllowedMethod(CorsConfiguration.ALL);
-        source.registerCorsConfiguration("/**", config);
-        FilterRegistrationBean filterRegistrationBean = new FilterRegistrationBean(new CorsFilter(source));
-        filterRegistrationBean.setOrder(Ordered.HIGHEST_PRECEDENCE);
-        filterRegistrationBean.setOrder(1);
-        filterRegistrationBean.setEnabled(true);
-        filterRegistrationBean.addUrlPatterns("/**");
-        Map<String, String> initParameters = Maps.newHashMap();
-        initParameters.put("excludes", "/favicon.ico,/img/*,/js/*,/css/*");
-        initParameters.put("isIncludeRichText", "true");
-        filterRegistrationBean.setInitParameters(initParameters);
-        return filterRegistrationBean;
-    }
- 
-    @Bean
-    public InternalResourceViewResolver viewResolver() {
-        InternalResourceViewResolver viewResolver = new InternalResourceViewResolver();
-        viewResolver.setPrefix("/WEB-INF/jsp/");
-        viewResolver.setSuffix(".jsp");
-        // viewResolver.setViewClass(JstlView.class);
-        // 这个属性通常并不需要手动配置,高版本的Spring会自动检测
-        return viewResolver;
-    }
- 
- 
- 
-    /**
-     * 替换框架json为fastjson
-     */
-    @Override
-    public void configureMessageConverters(List<HttpMessageConverter<?>> converters) {
-        FastJsonHttpMessageConverter fastConverter = new FastJsonHttpMessageConverter();
-        FastJsonConfig fastJsonConfig = new FastJsonConfig();
-        fastJsonConfig.setSerializerFeatures(SerializerFeature.PrettyFormat, SerializerFeature.WriteMapNullValue,
-            SerializerFeature.WriteNullStringAsEmpty, SerializerFeature.DisableCircularReferenceDetect);
-        // 处理中文乱码问题
-        List<MediaType> fastMediaTypes = new ArrayList<>();
-        fastMediaTypes.add(MediaType.APPLICATION_JSON_UTF8);
-        fastConverter.setSupportedMediaTypes(fastMediaTypes);
-        fastConverter.setFastJsonConfig(fastJsonConfig);
-        // 处理字符串, 避免直接返回字符串的时候被添加了引号
-        StringHttpMessageConverter smc = new StringHttpMessageConverter(Charset.forName("UTF-8"));
-        converters.add(smc);
-        converters.add(fastConverter);
-    }
- 
-    @Bean
-    public GlobalTransactionScanner globalTransactionScanner() {
-        return new GlobalTransactionScanner("test-client", "test-group");
-    }
- 
-}
-
-
-

再创建controller包,再包下创建TestController :

-
package org.test.controller;
- 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.beans.factory.annotation.Autowired;
-import org.springframework.context.annotation.Lazy;
-import org.springframework.web.bind.annotation.GetMapping;
-import org.springframework.web.bind.annotation.RequestMapping;
-import org.springframework.web.bind.annotation.RestController;
-import org.test.service.DemoService;
- 
-import io.swagger.annotations.Api;
-import io.swagger.annotations.ApiOperation;
- 
-/**
- * <p>
- * 文件表 前端控制器
- * </p>
- *
- * @author funkye
- * @since 2019-03-20
- */
-@RestController
-@RequestMapping("/test")
-@Api(tags = "测试接口")
-public class TestController {
- 
-    private final static Logger logger = LoggerFactory.getLogger(TestController.class);
-    @Autowired
-    @Lazy
-    DemoService demoService;
- 
-    @GetMapping(value = "testSeataOne")
-    @ApiOperation(value = "测试手动回滚分布式事务接口")
-    public Object testSeataOne() {
-        return demoService.One();
-    }
- 
-    @GetMapping(value = "testSeataTwo")
-    @ApiOperation(value = "测试异常回滚分布式事务接口")
-    public Object testSeataTwo() {
-        return demoService.Two();
-    }
- 
-}
-
-
-

再到service去创建需要依赖的DemoService

-
package org.test.service;
- 
-import java.time.LocalDateTime;
- 
-import org.apache.dubbo.config.annotation.Reference;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.stereotype.Service;
-import org.test.controller.TestController;
-import org.test.entity.Test;
- 
-import io.seata.core.context.RootContext;
-import io.seata.core.exception.TransactionException;
-import io.seata.spring.annotation.GlobalTransactional;
-import io.seata.tm.api.GlobalTransactionContext;
- 
-@Service
-public class DemoService {
-	@Reference(version = "1.0.0", timeout = 60000)
-	private ITestService testService;
-	private final static Logger logger = LoggerFactory.getLogger(DemoService.class);
- 
-	/**
-	 * 手动回滚示例
-	 * 
-	 * @return
-	 */
-	@GlobalTransactional
-	public Object One() {
-		logger.info("seata分布式事务Id:{}", RootContext.getXID());
-		Test t = new Test();
-		t.setOne("1");
-		t.setTwo("2");
-		t.setCreateTime(LocalDateTime.now());
-		testService.save(t);
-		try {
-			int i = 1 / 0;
-			return true;
-		} catch (Exception e) {
-			// TODO: handle exception
-			try {
-				logger.info("载入事务id进行回滚");
-				GlobalTransactionContext.reload(RootContext.getXID()).rollback();
-			} catch (TransactionException e1) {
-				// TODO Auto-generated catch block
-				e1.printStackTrace();
-			}
-		}
-		return false;
-	}
- 
-	/**
-	 * 抛出异常进行回滚示例
-	 * 
-	 * @return
-	 */
-	@GlobalTransactional
-	public Object Two() {
-		logger.info("seata分布式事务Id:{}", RootContext.getXID());
-		Test t = new Test();
-		t.setOne("1");
-		t.setTwo("2");
-		t.setCreateTime(LocalDateTime.now());
-		testService.save(t);
-		try {
-			int i = 1 / 0;
-			return true;
-		} catch (Exception e) {
-			// TODO: handle exception
-			throw new RuntimeException();
-		}
-	}
-}
-
-
-

一样创建resources文件夹,先创建常用的application.yml

-
spring:
-  application:
-     name: test
-  datasource:
-     driver-class-name: com.mysql.cj.jdbc.Driver
-     url: jdbc:mysql://127.0.0.1:3306/test?userSSL=true&useUnicode=true&characterEncoding=UTF8&serverTimezone=Asia/Shanghai
-     username: root
-     password: 123456
-  mvc:
-    servlet:
-      load-on-startup: 1
-  http:
-    encoding:
-            force: true
-            charset: utf-8
-            enabled: true
-    multipart:
-      max-file-size: 10MB
-      max-request-size: 10MB
-dubbo:
-  registry:
-    id: my-registry
-    address:  zookeeper://127.0.0.1:2181?client=curator
-#    address:  zookeeper://127.0.0.1:2181?client=curator
-  application:
-    name: dubbo-demo-client
-    qos-enable: false
-server:
-  port: 28888
-  max-http-header-size: 8192
-  address: 0.0.0.0
-  tomcat:
-    max-http-post-size: 104857600
-
-
-

再把之前service配置好的file跟registry文件复制来,如果你的client组名称再配置类里修改了,那么这里的file文件内的组名称一样需要更改.

-

-

完整的目录结构如上,这时候可以启动test-service后,再启动test-client,到swagger里测试咯

-

​ 4.访问127.0.0.1:28888/swagger-ui.html做最后的收尾

-

20191129143124

-

这里数据我已经存了一条记录了,我们看看会不会成功回滚:

-

20191129143252

-

刷新数据库,发现还是只有一条数据:

-

20191129143124

-

再查看日志:

-

20191129143407

-

显示已经回滚,我们再看看seata-server的日志:

- -

显示回滚成功,事务id也是一致的,这下我们的分布式事务就跑通咯,通过打断点方式,大家可以查看undo_log,会发现再事务提交前,会存入一条事务信息的数据,如果回滚成功,该信息就会被删除.

-

总结

-

seata的整合还是比较简单易入手,稍微用心一些你肯定写的比我更好!

-

欢迎大家也多去阅读seata,dubbo之类的源代码,能解决业务中遇到的大量的坑哦!

-
- - - - - - - diff --git a/zh-cn/blog/springboot-dubbo-mybatisplus-seata.json b/zh-cn/blog/springboot-dubbo-mybatisplus-seata.json deleted file mode 100644 index d75482d0..00000000 --- a/zh-cn/blog/springboot-dubbo-mybatisplus-seata.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "filename": "springboot-dubbo-mybatisplus-seata.md", - "__html": "

SpringBoot+Dubbo+MybatisPlus整合Seata分布式事务

\n

项目地址

\n

本文作者:FUNKYE(陈健斌),杭州某互联网公司主程。

\n

前言

\n

事务:事务是由一组操作构成的可靠的独立的工作单元,事务具备ACID的特性,即原子性、一致性、隔离性和持久性。\n​ 分布式事务:当一个操作牵涉到多个服务,多台数据库协力完成时(比如分表分库后,业务拆分),多个服务中,本地的Transaction已经无法应对这个情况了,为了保证数据一致性,就需要用到分布式事务。\n​ Seata :是一款开源的分布式事务解决方案,致力于在微服务架构下提供高性能和简单易用的分布式事务服务。\n​ 本文目的:现如今微服务越来越流行,而市面上的分布式事务的方案可谓不少,参差不齐,比较流行的以MQ代表的保证的是消息最终一致性的解决方案(消费确认,消息回查,消息补偿机制等),以及TX-LCN的LCN模式协调本地事务来保证事务统一提交或回滚(已经停止更新,对Dubbo2.7不兼容)。而MQ的分布式事务太过复杂,TX-LCN断更,这时候需要一个高效可靠及易上手的分布式事务解决方案,Seata脱颖而出,本文要介绍的就是如何快速搭建一个整合Seata的Demo项目,一起来吧!

\n

准备工作

\n

1.首先安装mysql,eclipse之类常用的工具,这不展开了.

\n

2.访问seata下载中心地址我们使用的0.9.0版本

\n

3.下载并解压seata-server

\n

建库建表

\n

1.首先我们链接mysql创建一个名为seata的数据库,然后运行一下建表sql,这个在seata-server的conf文件夹内的db_store.sql就是我的所需要使用的sql了.

\n
/*\nNavicat MySQL Data Transfer\nSource Server         : mysql\nSource Server Version : 50721\nSource Host           : localhost:3306\nSource Database       : seata\nTarget Server Type    : MYSQL\nTarget Server Version : 50721\nFile Encoding         : 65001\nDate: 2019-11-23 22:03:18\n*/\n\nSET FOREIGN_KEY_CHECKS=0;\n\n-- ----------------------------\n\n-- Table structure for branch_table\n\n-- ----------------------------\n\nDROP TABLE IF EXISTS `branch_table`;\nCREATE TABLE `branch_table` (\n  `branch_id` bigint(20) NOT NULL,\n  `xid` varchar(128) NOT NULL,\n  `transaction_id` bigint(20) DEFAULT NULL,\n  `resource_group_id` varchar(32) DEFAULT NULL,\n  `resource_id` varchar(256) DEFAULT NULL,\n  `lock_key` varchar(128) DEFAULT NULL,\n  `branch_type` varchar(8) DEFAULT NULL,\n  `status` tinyint(4) DEFAULT NULL,\n  `client_id` varchar(64) DEFAULT NULL,\n  `application_data` varchar(2000) DEFAULT NULL,\n  `gmt_create` datetime DEFAULT NULL,\n  `gmt_modified` datetime DEFAULT NULL,\n  PRIMARY KEY (`branch_id`),\n  KEY `idx_xid` (`xid`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;\n\n-- ----------------------------\n\n-- Records of branch_table\n\n-- ----------------------------\n\n-- ----------------------------\n\n-- Table structure for global_table\n\n-- ----------------------------\n\nDROP TABLE IF EXISTS `global_table`;\nCREATE TABLE `global_table` (\n  `xid` varchar(128) NOT NULL,\n  `transaction_id` bigint(20) DEFAULT NULL,\n  `status` tinyint(4) NOT NULL,\n  `application_id` varchar(32) DEFAULT NULL,\n  `transaction_service_group` varchar(32) DEFAULT NULL,\n  `transaction_name` varchar(128) DEFAULT NULL,\n  `timeout` int(11) DEFAULT NULL,\n  `begin_time` bigint(20) DEFAULT NULL,\n  `application_data` varchar(2000) DEFAULT NULL,\n  `gmt_create` datetime DEFAULT NULL,\n  `gmt_modified` datetime DEFAULT NULL,\n  PRIMARY KEY (`xid`),\n  KEY `idx_gmt_modified_status` (`gmt_modified`,`status`),\n  KEY `idx_transaction_id` (`transaction_id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;\n\n-- ----------------------------\n\n-- Records of global_table\n\n-- ----------------------------\n\n-- ----------------------------\n\n-- Table structure for lock_table\n\n-- ----------------------------\n\nDROP TABLE IF EXISTS `lock_table`;\nCREATE TABLE `lock_table` (\n  `row_key` varchar(128) NOT NULL,\n  `xid` varchar(96) DEFAULT NULL,\n  `transaction_id` mediumtext,\n  `branch_id` mediumtext,\n  `resource_id` varchar(256) DEFAULT NULL,\n  `table_name` varchar(32) DEFAULT NULL,\n  `pk` varchar(36) DEFAULT NULL,\n  `gmt_create` datetime DEFAULT NULL,\n  `gmt_modified` datetime DEFAULT NULL,\n  PRIMARY KEY (`row_key`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;\n\n-- ----------------------------\n\n-- Records of lock_table\n\n-- ----------------------------\n\n-- ----------------------------\n\n-- Table structure for undo_log\n\n-- ----------------------------\n\nDROP TABLE IF EXISTS `undo_log`;\nCREATE TABLE `undo_log` (\n  `id` bigint(20) NOT NULL AUTO_INCREMENT,\n  `branch_id` bigint(20) NOT NULL,\n  `xid` varchar(100) NOT NULL,\n  `context` varchar(128) NOT NULL,\n  `rollback_info` longblob NOT NULL,\n  `log_status` int(11) NOT NULL,\n  `log_created` datetime NOT NULL,\n  `log_modified` datetime NOT NULL,\n  `ext` varchar(100) DEFAULT NULL,\n  PRIMARY KEY (`id`),\n  UNIQUE KEY `ux_undo_log` (`xid`,`branch_id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n\n-- Records of undo_log\n
\n

2.运行完上面的seata所需要的数据库后,我们进行搭建我们所需要写的demo的库,创建一个名为test的数据库,然后执行以下sql代码:

\n
/*\nNavicat MySQL Data Transfer\nSource Server         : mysql\nSource Server Version : 50721\nSource Host           : localhost:3306\nSource Database       : test\nTarget Server Type    : MYSQL\nTarget Server Version : 50721\nFile Encoding         : 65001\nDate: 2019-11-23 22:03:24\n*/\n\nSET FOREIGN_KEY_CHECKS=0;\n\n-- ----------------------------\n\n-- Table structure for test\n\n-- ----------------------------\n\nDROP TABLE IF EXISTS `test`;\nCREATE TABLE `test` (\n  `id` int(11) NOT NULL AUTO_INCREMENT,\n  `one` varchar(255) DEFAULT NULL,\n  `two` varchar(255) DEFAULT NULL,\n  `createTime` datetime DEFAULT NULL,\n  PRIMARY KEY (`id`)\n) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=utf8mb4;\n\n-- ----------------------------\n\n-- Records of test\n\n-- ----------------------------\n\nINSERT INTO `test` VALUES ('1', '1', '2', '2019-11-23 16:07:34');\n\n-- ----------------------------\n\n-- Table structure for undo_log\n\n-- ----------------------------\n\nDROP TABLE IF EXISTS `undo_log`;\nCREATE TABLE `undo_log` (\n  `id` bigint(20) NOT NULL AUTO_INCREMENT,\n  `branch_id` bigint(20) NOT NULL,\n  `xid` varchar(100) NOT NULL,\n  `context` varchar(128) NOT NULL,\n  `rollback_info` longblob NOT NULL,\n  `log_status` int(11) NOT NULL,\n  `log_created` datetime NOT NULL,\n  `log_modified` datetime NOT NULL,\n  `ext` varchar(100) DEFAULT NULL,\n  PRIMARY KEY (`id`),\n  UNIQUE KEY `ux_undo_log` (`xid`,`branch_id`)\n) ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n\n-- Records of undo_log\n
\n

3.我们找到seata-server/conf 文件夹内的file编辑它:\"20191129132933\"

\n

4.再次找到其中的db配置方法块,更改方法如下图:\"\"

\n

好了,可以到bin目录去./seata-server.bat 运行看看了

\n

创建项目

\n

​\t首先我们使用的是eclipse,当然你也可以用idea之类的工具,详细请按下面步骤来运行

\n

​\t1.创建一个新的maven项目,并删除多余文件夹:\"20191129133354\"\"20191129133441\"

\n

​\t2.打开项目的pom.xml,加入以下依赖:

\n
\t<properties>\n\t\t<webVersion>3.1</webVersion>\n\t\t<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>\n\t\t<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>\n\t\t<maven.compiler.source>1.8</maven.compiler.source>\n\t\t<maven.compiler.target>1.8</maven.compiler.target>\n\t\t<HikariCP.version>3.2.0</HikariCP.version>\n\t\t<mybatis-plus-boot-starter.version>3.2.0</mybatis-plus-boot-starter.version>\n\t</properties>\n\t<parent>\n\t\t<groupId>org.springframework.boot</groupId>\n\t\t<artifactId>spring-boot-starter-parent</artifactId>\n\t\t<version>2.1.8.RELEASE</version>\n\t</parent>\n\t<dependencies>\n\t\t<dependency>\n\t\t\t<groupId>org.apache.curator</groupId>\n\t\t\t<artifactId>curator-framework</artifactId>\n\t\t\t<version>4.2.0</version>\n\t\t</dependency>\n\t\t<dependency>\n\t\t\t<groupId>org.apache.curator</groupId>\n\t\t\t<artifactId>curator-recipes</artifactId>\n\t\t\t<version>4.2.0</version>\n\t\t</dependency>\n\t\t<dependency>\n\t\t\t<groupId>org.apache.dubbo</groupId>\n\t\t\t<artifactId>dubbo-spring-boot-starter</artifactId>\n\t\t\t<version>2.7.4.1</version>\n\t\t</dependency>\n\t\t<dependency>\n\t\t\t<groupId>org.apache.commons</groupId>\n\t\t\t<artifactId>commons-lang3</artifactId>\n\t\t\t</dependency>\n\t\t<dependency>\n\t\t\t<groupId>com.alibaba</groupId>\n\t\t\t<artifactId>fastjson</artifactId>\n\t\t\t<version>1.2.60</version>\n\t\t</dependency>\n\t\t<!-- <dependency> <groupId>javax</groupId> <artifactId>javaee-api</artifactId> \n\t\t\t<version>7.0</version> <scope>provided</scope> </dependency> -->\n\t\t<dependency>\n\t\t\t<groupId>io.springfox</groupId>\n\t\t\t<artifactId>springfox-swagger2</artifactId>\n\t\t\t<version>2.9.2</version>\n\t\t</dependency>\n\t\t<dependency>\n\t\t\t<groupId>io.springfox</groupId>\n\t\t\t<artifactId>springfox-swagger-ui</artifactId>\n\t\t\t<version>2.9.2</version>\n\t\t</dependency>\n \n\t\t<!-- mybatis-plus begin -->\n\t\t<dependency>\n\t\t\t<groupId>com.baomidou</groupId>\n\t\t\t<artifactId>mybatis-plus-boot-starter</artifactId>\n\t\t\t<version>${mybatis-plus-boot-starter.version}</version>\n\t\t</dependency>\n\t\t<!-- mybatis-plus end -->\n\t\t<!-- https://mvnrepository.com/artifact/org.projectlombok/lombok -->\n\t\t<dependency>\n\t\t\t<groupId>org.projectlombok</groupId>\n\t\t\t<artifactId>lombok</artifactId>\n\t\t\t<scope>provided</scope>\n\t\t</dependency>\n\t\t<dependency>\n\t\t\t<groupId>io.seata</groupId>\n\t\t\t<artifactId>seata-all</artifactId>\n\t\t\t<version>0.9.0.1</version>\n\t\t</dependency>\n\t\t<!-- Zookeeper -->\n\t\t<dependency>\n\t\t\t<groupId>org.apache.zookeeper</groupId>\n\t\t\t<artifactId>zookeeper</artifactId>\n\t\t\t<version>3.4.9</version>\n\t\t\t<exclusions>\n\t\t\t\t<exclusion>\n\t\t\t\t\t<groupId>org.slf4j</groupId>\n\t\t\t\t\t<artifactId>slf4j-log4j12</artifactId>\n\t\t\t\t</exclusion>\n\t\t\t</exclusions>\n\t\t</dependency>\n\t\t<!-- <dependency> <groupId>com.baomidou</groupId> <artifactId>dynamic-datasource-spring-boot-starter</artifactId> \n\t\t\t<version>2.5.4</version> </dependency> -->\n \n\t\t<!-- <dependency> <groupId>com.baomidou</groupId> <artifactId>mybatis-plus-generator</artifactId> \n\t\t\t<version>3.1.0</version> </dependency> -->\n\t\t<!-- https://mvnrepository.com/artifact/org.freemarker/freemarker -->\n\t\t<dependency>\n\t\t\t<groupId>org.freemarker</groupId>\n\t\t\t<artifactId>freemarker</artifactId>\n\t\t</dependency>\n\t\t<!-- https://mvnrepository.com/artifact/com.alibaba/druid-spring-boot-starter -->\n\t\t<dependency>\n\t\t\t<groupId>com.alibaba</groupId>\n\t\t\t<artifactId>druid-spring-boot-starter</artifactId>\n\t\t\t<version>1.1.20</version>\n\t\t</dependency>\n\t\t<!-- 加上这个才能辨认到log4j2.yml文件 -->\n\t\t<dependency>\n\t\t\t<groupId>com.fasterxml.jackson.dataformat</groupId>\n\t\t\t<artifactId>jackson-dataformat-yaml</artifactId>\n\t\t</dependency>\n\t\t<dependency> <!-- 引入log4j2依赖 -->\n\t\t\t<groupId>org.springframework.boot</groupId>\n\t\t\t<artifactId>spring-boot-starter-log4j2</artifactId>\n\t\t</dependency>\n\t\t<!-- https://mvnrepository.com/artifact/mysql/mysql-connector-java -->\n\t\t<dependency>\n\t\t\t<groupId>mysql</groupId>\n\t\t\t<artifactId>mysql-connector-java</artifactId>\n\t\t</dependency>\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot</groupId>\n\t\t\t<artifactId>spring-boot-starter-web</artifactId>\n\t\t\t<exclusions>\n\t\t\t\t<exclusion>\n\t\t\t\t\t<groupId>org.springframework.boot</groupId>\n\t\t\t\t\t<artifactId>spring-boot-starter-logging</artifactId>\n\t\t\t\t</exclusion>\n\t\t\t\t<exclusion>\n\t\t\t\t\t<groupId>org.slf4j</groupId>\n\t\t\t\t\t<artifactId>slf4j-log4j12</artifactId>\n\t\t\t\t</exclusion>\n\t\t\t</exclusions>\n\t\t</dependency>\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot</groupId>\n\t\t\t<artifactId>spring-boot-starter-aop</artifactId>\n\t\t</dependency>\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot</groupId>\n\t\t\t<artifactId>spring-boot-starter-test</artifactId>\n\t\t\t<scope>test</scope>\n\t\t</dependency>\n\t\t<!-- <dependency> <groupId>org.scala-lang</groupId> <artifactId>scala-library</artifactId> \n\t\t\t<version>2.11.0</version> </dependency> -->\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot</groupId>\n\t\t\t<artifactId>spring-boot-configuration-processor</artifactId>\n\t\t\t<optional>true</optional>\n\t\t</dependency>\n\t</dependencies>\n\n
\n

​\t3.再切换父项目为pom模式,还是pom文件,切换为 overview ,做如图操作:\"20191129134127\"

\n

4.创建我们的demo子项目,test-service:\"20191129135935\"

\n

​\t目录如下:

\n\"20191129140048\"\n
创建EmbeddedZooKeeper.java文件,跟 ProviderApplication.java,代码如下:\n
\n
package org.test;\n \nimport java.io.File;\nimport java.lang.reflect.Method;\nimport java.util.Properties;\nimport java.util.UUID;\n \nimport org.apache.zookeeper.server.ServerConfig;\nimport org.apache.zookeeper.server.ZooKeeperServerMain;\nimport org.apache.zookeeper.server.quorum.QuorumPeerConfig;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\nimport org.springframework.context.SmartLifecycle;\nimport org.springframework.util.ErrorHandler;\nimport org.springframework.util.SocketUtils;\n \n/**\n * from:\n * https://github.com/spring-projects/spring-xd/blob/v1.3.1.RELEASE/spring-xd-dirt/src/main/java/org/springframework/xd/dirt/zookeeper/ZooKeeperUtils.java\n * \n * Helper class to start an embedded instance of standalone (non clustered) ZooKeeper.\n * \n * NOTE: at least an external standalone server (if not an ensemble) are recommended, even for\n * {@link org.springframework.xd.dirt.server.singlenode.SingleNodeApplication}\n * \n * @author Patrick Peralta\n * @author Mark Fisher\n * @author David Turanski\n */\npublic class EmbeddedZooKeeper implements SmartLifecycle {\n \n    /**\n     * Logger.\n     */\n    private static final Logger logger = LoggerFactory.getLogger(EmbeddedZooKeeper.class);\n \n    /**\n     * ZooKeeper client port. This will be determined dynamically upon startup.\n     */\n    private final int clientPort;\n \n    /**\n     * Whether to auto-start. Default is true.\n     */\n    private boolean autoStartup = true;\n \n    /**\n     * Lifecycle phase. Default is 0.\n     */\n    private int phase = 0;\n \n    /**\n     * Thread for running the ZooKeeper server.\n     */\n    private volatile Thread zkServerThread;\n \n    /**\n     * ZooKeeper server.\n     */\n    private volatile ZooKeeperServerMain zkServer;\n \n    /**\n     * {@link ErrorHandler} to be invoked if an Exception is thrown from the ZooKeeper server thread.\n     */\n    private ErrorHandler errorHandler;\n \n    private boolean daemon = true;\n \n    /**\n     * Construct an EmbeddedZooKeeper with a random port.\n     */\n    public EmbeddedZooKeeper() {\n        clientPort = SocketUtils.findAvailableTcpPort();\n    }\n \n    /**\n     * Construct an EmbeddedZooKeeper with the provided port.\n     *\n     * @param clientPort\n     *            port for ZooKeeper server to bind to\n     */\n    public EmbeddedZooKeeper(int clientPort, boolean daemon) {\n        this.clientPort = clientPort;\n        this.daemon = daemon;\n    }\n \n    /**\n     * Returns the port that clients should use to connect to this embedded server.\n     * \n     * @return dynamically determined client port\n     */\n    public int getClientPort() {\n        return this.clientPort;\n    }\n \n    /**\n     * Specify whether to start automatically. Default is true.\n     * \n     * @param autoStartup\n     *            whether to start automatically\n     */\n    public void setAutoStartup(boolean autoStartup) {\n        this.autoStartup = autoStartup;\n    }\n \n    /**\n     * {@inheritDoc}\n     */\n    public boolean isAutoStartup() {\n        return this.autoStartup;\n    }\n \n    /**\n     * Specify the lifecycle phase for the embedded server.\n     * \n     * @param phase\n     *            the lifecycle phase\n     */\n    public void setPhase(int phase) {\n        this.phase = phase;\n    }\n \n    /**\n     * {@inheritDoc}\n     */\n    public int getPhase() {\n        return this.phase;\n    }\n \n    /**\n     * {@inheritDoc}\n     */\n    public boolean isRunning() {\n        return (zkServerThread != null);\n    }\n \n    /**\n     * Start the ZooKeeper server in a background thread.\n     * <p>\n     * Register an error handler via {@link #setErrorHandler} in order to handle any exceptions thrown during startup or\n     * execution.\n     */\n    public synchronized void start() {\n        if (zkServerThread == null) {\n            zkServerThread = new Thread(new ServerRunnable(), \"ZooKeeper Server Starter\");\n            zkServerThread.setDaemon(daemon);\n            zkServerThread.start();\n        }\n    }\n \n    /**\n     * Shutdown the ZooKeeper server.\n     */\n    public synchronized void stop() {\n        if (zkServerThread != null) {\n            // The shutdown method is protected...thus this hack to invoke it.\n            // This will log an exception on shutdown; see\n            // https://issues.apache.org/jira/browse/ZOOKEEPER-1873 for details.\n            try {\n                Method shutdown = ZooKeeperServerMain.class.getDeclaredMethod(\"shutdown\");\n                shutdown.setAccessible(true);\n                shutdown.invoke(zkServer);\n            }\n \n            catch (Exception e) {\n                throw new RuntimeException(e);\n            }\n \n            // It is expected that the thread will exit after\n            // the server is shutdown; this will block until\n            // the shutdown is complete.\n            try {\n                zkServerThread.join(5000);\n                zkServerThread = null;\n            } catch (InterruptedException e) {\n                Thread.currentThread().interrupt();\n                logger.warn(\"Interrupted while waiting for embedded ZooKeeper to exit\");\n                // abandoning zk thread\n                zkServerThread = null;\n            }\n        }\n    }\n \n    /**\n     * Stop the server if running and invoke the callback when complete.\n     */\n    public void stop(Runnable callback) {\n        stop();\n        callback.run();\n    }\n \n    /**\n     * Provide an {@link ErrorHandler} to be invoked if an Exception is thrown from the ZooKeeper server thread. If none\n     * is provided, only error-level logging will occur.\n     * \n     * @param errorHandler\n     *            the {@link ErrorHandler} to be invoked\n     */\n    public void setErrorHandler(ErrorHandler errorHandler) {\n        this.errorHandler = errorHandler;\n    }\n \n    /**\n     * Runnable implementation that starts the ZooKeeper server.\n     */\n    private class ServerRunnable implements Runnable {\n \n        public void run() {\n            try {\n                Properties properties = new Properties();\n                File file = new File(System.getProperty(\"java.io.tmpdir\") + File.separator + UUID.randomUUID());\n                file.deleteOnExit();\n                properties.setProperty(\"dataDir\", file.getAbsolutePath());\n                properties.setProperty(\"clientPort\", String.valueOf(clientPort));\n \n                QuorumPeerConfig quorumPeerConfig = new QuorumPeerConfig();\n                quorumPeerConfig.parseProperties(properties);\n \n                zkServer = new ZooKeeperServerMain();\n                ServerConfig configuration = new ServerConfig();\n                configuration.readFrom(quorumPeerConfig);\n \n                zkServer.runFromConfig(configuration);\n            } catch (Exception e) {\n                if (errorHandler != null) {\n                    errorHandler.handleError(e);\n                } else {\n                    logger.error(\"Exception running embedded ZooKeeper\", e);\n                }\n            }\n        }\n    }\n \n}\n\n
\n
package org.test;\n \nimport org.apache.dubbo.config.spring.context.annotation.DubboComponentScan;\nimport org.springframework.boot.SpringApplication;\nimport org.springframework.boot.autoconfigure.SpringBootApplication;\nimport org.springframework.context.annotation.ComponentScan;\nimport org.springframework.transaction.annotation.EnableTransactionManagement;\n \n/**\n * \n * @author cjb\n * @date 2019/10/24\n */\n@EnableTransactionManagement\n@ComponentScan(basePackages = {\"org.test.config\", \"org.test.service.impl\"})\n@DubboComponentScan(basePackages = \"org.test.service.impl\")\n@SpringBootApplication\npublic class ProviderApplication {\n \n    public static void main(String[] args) {\n        new EmbeddedZooKeeper(2181, false).start();\n        SpringApplication app = new SpringApplication(ProviderApplication.class);\n        app.run(args);\n    }\n \n}\n\n
\n
创建实体包 org.test.entity以及创建实体类Test 用到了lombok,详细百度一下,eclipse装lombok插件\n
\n
package org.test.entity;\n \nimport java.io.Serializable;\nimport java.time.LocalDateTime;\n \nimport com.baomidou.mybatisplus.annotation.IdType;\nimport com.baomidou.mybatisplus.annotation.TableField;\nimport com.baomidou.mybatisplus.annotation.TableId;\n \nimport io.swagger.annotations.ApiModel;\nimport io.swagger.annotations.ApiModelProperty;\nimport lombok.Data;\nimport lombok.EqualsAndHashCode;\nimport lombok.experimental.Accessors;\n \n/**\n * <p>\n * 功能\n * </p>\n *\n * @author Funkye\n * @since 2019-04-23\n */\n@Data\n@EqualsAndHashCode(callSuper = false)\n@Accessors(chain = true)\n@ApiModel(value = \"test对象\", description = \"功能\")\npublic class Test implements Serializable {\n \n    private static final long serialVersionUID = 1L;\n \n    @ApiModelProperty(value = \"主键\")\n    @TableId(value = \"id\", type = IdType.AUTO)\n    private Integer id;\n \n    @ApiModelProperty(value = \"one\")\n    @TableField(\"one\")\n    private String one;\n \n    @ApiModelProperty(value = \"two\")\n    @TableField(\"two\")\n    private String two;\n \n    @ApiModelProperty(value = \"createTime\")\n    @TableField(\"createTime\")\n    private LocalDateTime createTime;\n \n}\n\n
\n

​\t创建service,service.impl,mapper等包,依次创建ITestservice,以及实现类,mapper

\n
package org.test.service;\n \nimport org.test.entity.Test;\n \nimport com.baomidou.mybatisplus.extension.service.IService; \n \n/**\n * <p>\n * 功能 服务类\n * </p>\n *\n * @author Funkye\n * @since 2019-04-10\n */\npublic interface ITestService extends IService<Test> {\n \n}\n\n
\n
package org.test.service.impl;\n \n \n \n \nimport org.apache.dubbo.config.annotation.Service;\nimport org.test.entity.Test;\nimport org.test.mapper.TestMapper;\nimport org.test.service.ITestService;\n \nimport com.baomidou.mybatisplus.extension.service.impl.ServiceImpl;\n \n@Service(version = \"1.0.0\",interfaceClass =ITestService.class )\npublic class TestServiceImpl extends ServiceImpl<TestMapper, Test> implements ITestService {\n \n}\n\n
\n
package org.test.mapper;\n \nimport org.test.entity.Test; \n \nimport com.baomidou.mybatisplus.core.mapper.BaseMapper;\n \n/**\n * <p>\n * 功能 Mapper 接口\n * </p>\n *\n * @author Funkye\n * @since 2019-04-10\n */\npublic interface TestMapper extends BaseMapper<Test> {\n \n}\n\n
\n
创建org.test.config包,创建SeataAutoConfig.java,配置信息都在此处,主要作用为代理数据,连接事务服务分组 \n
\n
package org.test.config;\n\nimport javax.sql.DataSource;\n\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\nimport org.springframework.beans.factory.annotation.Autowired;\nimport org.springframework.beans.factory.annotation.Qualifier;\nimport org.springframework.boot.autoconfigure.jdbc.DataSourceProperties;\nimport org.springframework.context.annotation.Bean;\nimport org.springframework.context.annotation.Configuration;\nimport org.springframework.context.annotation.Primary;\n\nimport com.alibaba.druid.pool.DruidDataSource;\n\nimport io.seata.rm.datasource.DataSourceProxy;\nimport io.seata.spring.annotation.GlobalTransactionScanner;\n\n@Configuration\npublic class SeataAutoConfig {\n\t@Autowired(required = true)\n\tprivate DataSourceProperties dataSourceProperties;\n\tprivate final static Logger logger = LoggerFactory.getLogger(SeataAutoConfig.class);\n\n\t@Bean(name = \"druidDataSource\") // 声明其为Bean实例\n\tpublic DataSource druidDataSource() {\n\t\tDruidDataSource druidDataSource = new DruidDataSource();\n\t\tlogger.info(\"dataSourceProperties.getUrl():{}\", dataSourceProperties.getUrl());\n\t\tdruidDataSource.setUrl(dataSourceProperties.getUrl());\n\t\tdruidDataSource.setUsername(dataSourceProperties.getUsername());\n\t\tdruidDataSource.setPassword(dataSourceProperties.getPassword());\n\t\tdruidDataSource.setDriverClassName(dataSourceProperties.getDriverClassName());\n\t\tdruidDataSource.setInitialSize(0);\n\t\tdruidDataSource.setMaxActive(180);\n\t\tdruidDataSource.setMaxWait(60000);\n\t\tdruidDataSource.setMinIdle(0);\n\t\tdruidDataSource.setValidationQuery(\"Select 1 from DUAL\");\n\t\tdruidDataSource.setTestOnBorrow(false);\n\t\tdruidDataSource.setTestOnReturn(false);\n\t\tdruidDataSource.setTestWhileIdle(true);\n\t\tdruidDataSource.setTimeBetweenEvictionRunsMillis(60000);\n\t\tdruidDataSource.setMinEvictableIdleTimeMillis(25200000);\n\t\tdruidDataSource.setRemoveAbandoned(true);\n\t\tdruidDataSource.setRemoveAbandonedTimeout(1800);\n\t\tdruidDataSource.setLogAbandoned(true);\n\t\tlogger.info(\"装载dataSource........\");\n\t\treturn druidDataSource;\n\t}\n\n\t/**\n\t * init datasource proxy\n\t * \n\t * @Param: druidDataSource datasource bean instance\n\t * @Return: DataSourceProxy datasource proxy\n\t */\n\t@Bean(name = \"dataSource\")\n\t@Primary // 在同样的DataSource中,首先使用被标注的DataSource\n\tpublic DataSourceProxy dataSourceProxy(@Qualifier(value = \"druidDataSource\") DruidDataSource druidDataSource) {\n\t\tlogger.info(\"代理dataSource........\");\n\t\treturn new DataSourceProxy(druidDataSource);\n\t}\n\n\t/**\n\t * init global transaction scanner\n\t *\n\t * @Return: GlobalTransactionScanner\n\t */\n\t@Bean\n\tpublic GlobalTransactionScanner globalTransactionScanner() {\n\t\tlogger.info(\"配置seata........\");\n\t\treturn new GlobalTransactionScanner(\"test-service\", \"test-group\");\n\t}\n}\n
\n
再创建mybatisplus所需的配置文件MybatisPlusConfig  \n
\n
package org.test.config;\n \nimport java.util.ArrayList;\nimport java.util.List;\n \nimport org.mybatis.spring.mapper.MapperScannerConfigurer;\nimport org.springframework.context.annotation.Bean;\nimport org.springframework.context.annotation.Configuration;\n \nimport com.baomidou.mybatisplus.core.parser.ISqlParser;\nimport com.baomidou.mybatisplus.extension.parsers.BlockAttackSqlParser;\nimport com.baomidou.mybatisplus.extension.plugins.PaginationInterceptor;\n \n@Configuration\n// @MapperScan(\"com.baomidou.springboot.mapper*\")//这个注解,作用相当于下面的@Bean\n// MapperScannerConfigurer,2者配置1份即可\npublic class MybatisPlusConfig {\n \n    /**\n     * mybatis-plus分页插件<br>\n     * 文档:http://mp.baomidou.com<br>\n     */\n    @Bean\n    public PaginationInterceptor paginationInterceptor() {\n        PaginationInterceptor paginationInterceptor = new PaginationInterceptor();\n        List<ISqlParser> sqlParserList = new ArrayList<ISqlParser>();\n        // 攻击 SQL 阻断解析器、加入解析链\n        sqlParserList.add(new BlockAttackSqlParser());\n        paginationInterceptor.setSqlParserList(sqlParserList);\n        return paginationInterceptor;\n    }\n \n    /**\n     * 相当于顶部的: {@code @MapperScan(\"com.baomidou.springboot.mapper*\")} 这里可以扩展,比如使用配置文件来配置扫描Mapper的路径\n     */\n \n    @Bean\n    public MapperScannerConfigurer mapperScannerConfigurer() {\n        MapperScannerConfigurer scannerConfigurer = new MapperScannerConfigurer();\n        scannerConfigurer.setBasePackage(\"org.test.mapper\");\n        return scannerConfigurer;\n    }\n \n}\n\n
\n

​\t 再创建resources目录,创建mapper文件夹,application.yml等文件

\n
server:\n  port: 38888\nspring:\n  application: \n      name: test-service\n  datasource:\n    type: com.alibaba.druid.pool.DruidDataSource\n    url: jdbc:mysql://127.0.0.1:3306/test?useUnicode=true&characterEncoding=UTF-8&serverTimezone=UTC\n    driver-class-name: com.mysql.cj.jdbc.Driver\n    username: root\n    password: 123456\ndubbo:\n  protocol:\n    loadbalance: leastactive\n    threadpool: cached\n  scan:\n    base-packages: org。test.service\n  application:\n    qos-enable: false\n    name: testserver\n  registry:\n    id: my-registry\n    address:  zookeeper://127.0.0.1:2181?client=curator\nmybatis-plus:\n  mapper-locations: classpath:/mapper/*Mapper.xml\n  typeAliasesPackage: org.test.entity\n  global-config:\n    db-config:\n      field-strategy: not-empty\n      id-type: auto\n      db-type: mysql\n  configuration:\n    map-underscore-to-camel-case: true\n    cache-enabled: true      \n    auto-mapping-unknown-column-behavior: none\n\n
\n

​\t 创建file.conf,此处的service 内的vgroup_mapping.你的事务分组,比如上面SeataAutoConfig内配置了test-group,那么这里也要改为test-group,然后下面ip端口都是seata运行的ip跟端口就行了

\n
transport {\n  type = \"TCP\"\n  server = \"NIO\"\n  heartbeat = true\n  thread-factory {\n    boss-thread-prefix = \"NettyBoss\"\n    worker-thread-prefix = \"NettyServerNIOWorker\"\n    server-executor-thread-prefix = \"NettyServerBizHandler\"\n    share-boss-worker = false\n    client-selector-thread-prefix = \"NettyClientSelector\"\n    client-selector-thread-size = 1\n    client-worker-thread-prefix = \"NettyClientWorkerThread\"\n    boss-thread-size = 1\n    worker-thread-size = 8\n  }\n  shutdown {\n    wait = 3\n  }\n  serialization = \"seata\"\n  compressor = \"none\"\n}\nservice {\n  vgroup_mapping.test-group = \"default\"\n  default.grouplist = \"127.0.0.1:8091\"\n  enableDegrade = false\n  disable = false\n  max.commit.retry.timeout = \"-1\"\n  max.rollback.retry.timeout = \"-1\"\n}\n \nclient {\n  async.commit.buffer.limit = 10000\n  lock {\n    retry.internal = 10\n    retry.times = 30\n  }\n  report.retry.count = 5\n  tm.commit.retry.count = 1\n  tm.rollback.retry.count = 1\n  undo.log.table = \"undo_log\"\n}\n \nrecovery {\n  committing-retry-period = 1000\n  asyn-committing-retry-period = 1000\n  rollbacking-retry-period = 1000\n  timeout-retry-period = 1000\n}\n \ntransaction {\n  undo.data.validation = true\n  undo.log.serialization = \"jackson\"\n  undo.log.save.days = 7\n  undo.log.delete.period = 86400000\n  undo.log.table = \"undo_log\"\n}\n \nmetrics {\n  enabled = false\n  registry-type = \"compact\"\n  exporter-list = \"prometheus\"\n  exporter-prometheus-port = 9898\n}\n \nsupport {\n  spring {\n    datasource.autoproxy = false\n  }\n}\n\n
\n

创建registry.conf,来指定file,zk的ip端口之类的配置

\n
registry {\n  type = \"file\"\n  file {\n    name = \"file.conf\"\n  }\n}\nconfig {\n  type = \"file\"\n  file {\n    name = \"file.conf\"\n  }\n  zk {\n    serverAddr = \"127.0.0.1:2181\"\n    session.timeout = 6000\n    connect.timeout = 2000\n  }\n}\n\n
\n

​\t 大功告成,可以直接运行啦,这时候观察seata-server\"20191129142115\"

\n

​\t接下来我们创建test-client项目项目,这里就不赘述了,跟上面的test-service一样的创建方式

\n

​\t接下来我们复制test-service内的service跟实体过去,当然你嫌麻烦,可以单独搞个子项目放通用的service跟实体,一些工具类等等,我这边为了快速搭建这个demo,就选择复制黏贴的方式了.

\n

目录结构:\"\"

\n
然后我们创建ClientApplication:\n
\n
package org.test;\n \nimport java.util.TimeZone;\nimport java.util.concurrent.Executor;\n \nimport org.apache.dubbo.config.spring.context.annotation.EnableDubbo;\nimport org.springframework.boot.SpringApplication;\nimport org.springframework.boot.autoconfigure.SpringBootApplication;\nimport org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration;\nimport org.springframework.context.annotation.Bean;\nimport org.springframework.context.annotation.ComponentScan;\nimport org.springframework.context.annotation.Configuration;\nimport org.springframework.scheduling.annotation.EnableAsync;\nimport org.springframework.scheduling.annotation.EnableScheduling;\nimport org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;\n \nimport com.baomidou.mybatisplus.autoconfigure.MybatisPlusAutoConfiguration;\n \n@SpringBootApplication(exclude = {DataSourceAutoConfiguration.class, MybatisPlusAutoConfiguration.class})\n@EnableScheduling\n@EnableAsync\n@Configuration\n@EnableDubbo(scanBasePackages = {\"org.test.service\"})\n@ComponentScan(basePackages = {\"org.test.service\", \"org.test.controller\", \"org.test.config\"})\npublic class ClientApplication {\n    public static void main(String[] args) {\n        TimeZone.setDefault(TimeZone.getTimeZone(\"Asia/Shanghai\"));\n        SpringApplication app = new SpringApplication(ClientApplication.class);\n        app.run(args);\n    }\n \n    @Bean(name = \"threadPoolTaskExecutor\")\n    public Executor threadPoolTaskExecutor() {\n        return new ThreadPoolTaskExecutor();\n    }\n}\n\n
\n

再到config包内创建SwaggerConfig :

\n
package org.test.config;\n \nimport java.util.ArrayList;\nimport java.util.List;\n \nimport org.springframework.context.annotation.Bean;\nimport org.springframework.context.annotation.Configuration;\n \nimport springfox.documentation.builders.ApiInfoBuilder;\nimport springfox.documentation.builders.PathSelectors;\nimport springfox.documentation.builders.RequestHandlerSelectors;\nimport springfox.documentation.service.ApiInfo;\nimport springfox.documentation.service.Contact;\nimport springfox.documentation.service.Parameter;\nimport springfox.documentation.spi.DocumentationType;\nimport springfox.documentation.spring.web.plugins.Docket;\nimport springfox.documentation.swagger2.annotations.EnableSwagger2;\n \n@Configuration\n@EnableSwagger2\npublic class SwaggerConfig {\n    // swagger2的配置文件,这里可以配置swagger2的一些基本的内容,比如扫描的包等等\n    @Bean\n    public Docket createRestApi() {\n        List<Parameter> pars = new ArrayList<Parameter>();\n        return new Docket(DocumentationType.SWAGGER_2).apiInfo(apiInfo()).select()\n            // 为当前包路径\n            .apis(RequestHandlerSelectors.basePackage(\"org.test.controller\")).paths(PathSelectors.any()).build()\n            .globalOperationParameters(pars);\n    }\n \n    // 构建 api文档的详细信息函数,注意这里的注解引用的是哪个\n    private ApiInfo apiInfo() {\n        return new ApiInfoBuilder()\n            // 页面标题\n            .title(\"项目接口\")\n            // 创建人\n            .contact(new Contact(\"FUNKYE\", \"\", \"\"))\n            // 版本号\n            .version(\"1.0\")\n            // 描述\n            .description(\"API 描述\").build();\n    }\n}\n\n
\n

​\t再创建SpringMvcConfigure,再里面放入seata的配置,我为了偷懒直接集成在mvc配置的类里了,大家规范点可以另外创建个配置seata的类,大家可以发现下面还是有个组名称,我把两个项目都分配到一个组去,貌似另外取一个也没事的.

\n
package org.test.config;\n \nimport java.nio.charset.Charset; \nimport java.util.ArrayList;\nimport java.util.List;\nimport java.util.Map;\n \nimport org.apache.dubbo.config.annotation.Reference;\nimport org.springframework.boot.web.servlet.FilterRegistrationBean;\nimport org.springframework.context.annotation.Bean;\nimport org.springframework.context.annotation.Configuration;\nimport org.springframework.core.Ordered;\nimport org.springframework.http.MediaType;\nimport org.springframework.http.converter.HttpMessageConverter;\nimport org.springframework.http.converter.StringHttpMessageConverter;\nimport org.springframework.web.cors.CorsConfiguration;\nimport org.springframework.web.cors.UrlBasedCorsConfigurationSource;\nimport org.springframework.web.filter.CorsFilter;\nimport org.springframework.web.servlet.HandlerInterceptor;\nimport org.springframework.web.servlet.config.annotation.InterceptorRegistry;\nimport org.springframework.web.servlet.config.annotation.WebMvcConfigurer;\nimport org.springframework.web.servlet.view.InternalResourceViewResolver;\n \nimport com.alibaba.fastjson.serializer.SerializerFeature;\nimport com.alibaba.fastjson.support.config.FastJsonConfig;\nimport com.alibaba.fastjson.support.spring.FastJsonHttpMessageConverter;\nimport com.google.common.collect.Maps;\n \nimport io.seata.spring.annotation.GlobalTransactionScanner;\n \n@Configuration\npublic class SpringMvcConfigure implements WebMvcConfigurer {\n \n    @Bean\n    public FilterRegistrationBean corsFilter() {\n        UrlBasedCorsConfigurationSource source = new UrlBasedCorsConfigurationSource();\n        CorsConfiguration config = new CorsConfiguration();\n        config.setAllowCredentials(true);\n        config.addAllowedOrigin(\"*\");\n        config.addAllowedHeader(CorsConfiguration.ALL);\n        config.addAllowedMethod(CorsConfiguration.ALL);\n        source.registerCorsConfiguration(\"/**\", config);\n        FilterRegistrationBean filterRegistrationBean = new FilterRegistrationBean(new CorsFilter(source));\n        filterRegistrationBean.setOrder(Ordered.HIGHEST_PRECEDENCE);\n        filterRegistrationBean.setOrder(1);\n        filterRegistrationBean.setEnabled(true);\n        filterRegistrationBean.addUrlPatterns(\"/**\");\n        Map<String, String> initParameters = Maps.newHashMap();\n        initParameters.put(\"excludes\", \"/favicon.ico,/img/*,/js/*,/css/*\");\n        initParameters.put(\"isIncludeRichText\", \"true\");\n        filterRegistrationBean.setInitParameters(initParameters);\n        return filterRegistrationBean;\n    }\n \n    @Bean\n    public InternalResourceViewResolver viewResolver() {\n        InternalResourceViewResolver viewResolver = new InternalResourceViewResolver();\n        viewResolver.setPrefix(\"/WEB-INF/jsp/\");\n        viewResolver.setSuffix(\".jsp\");\n        // viewResolver.setViewClass(JstlView.class);\n        // 这个属性通常并不需要手动配置,高版本的Spring会自动检测\n        return viewResolver;\n    }\n \n \n \n    /**\n     * 替换框架json为fastjson\n     */\n    @Override\n    public void configureMessageConverters(List<HttpMessageConverter<?>> converters) {\n        FastJsonHttpMessageConverter fastConverter = new FastJsonHttpMessageConverter();\n        FastJsonConfig fastJsonConfig = new FastJsonConfig();\n        fastJsonConfig.setSerializerFeatures(SerializerFeature.PrettyFormat, SerializerFeature.WriteMapNullValue,\n            SerializerFeature.WriteNullStringAsEmpty, SerializerFeature.DisableCircularReferenceDetect);\n        // 处理中文乱码问题\n        List<MediaType> fastMediaTypes = new ArrayList<>();\n        fastMediaTypes.add(MediaType.APPLICATION_JSON_UTF8);\n        fastConverter.setSupportedMediaTypes(fastMediaTypes);\n        fastConverter.setFastJsonConfig(fastJsonConfig);\n        // 处理字符串, 避免直接返回字符串的时候被添加了引号\n        StringHttpMessageConverter smc = new StringHttpMessageConverter(Charset.forName(\"UTF-8\"));\n        converters.add(smc);\n        converters.add(fastConverter);\n    }\n \n    @Bean\n    public GlobalTransactionScanner globalTransactionScanner() {\n        return new GlobalTransactionScanner(\"test-client\", \"test-group\");\n    }\n \n}\n\n
\n

再创建controller包,再包下创建TestController :

\n
package org.test.controller;\n \nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\nimport org.springframework.beans.factory.annotation.Autowired;\nimport org.springframework.context.annotation.Lazy;\nimport org.springframework.web.bind.annotation.GetMapping;\nimport org.springframework.web.bind.annotation.RequestMapping;\nimport org.springframework.web.bind.annotation.RestController;\nimport org.test.service.DemoService;\n \nimport io.swagger.annotations.Api;\nimport io.swagger.annotations.ApiOperation;\n \n/**\n * <p>\n * 文件表 前端控制器\n * </p>\n *\n * @author funkye\n * @since 2019-03-20\n */\n@RestController\n@RequestMapping(\"/test\")\n@Api(tags = \"测试接口\")\npublic class TestController {\n \n    private final static Logger logger = LoggerFactory.getLogger(TestController.class);\n    @Autowired\n    @Lazy\n    DemoService demoService;\n \n    @GetMapping(value = \"testSeataOne\")\n    @ApiOperation(value = \"测试手动回滚分布式事务接口\")\n    public Object testSeataOne() {\n        return demoService.One();\n    }\n \n    @GetMapping(value = \"testSeataTwo\")\n    @ApiOperation(value = \"测试异常回滚分布式事务接口\")\n    public Object testSeataTwo() {\n        return demoService.Two();\n    }\n \n}\n\n
\n

再到service去创建需要依赖的DemoService

\n
package org.test.service;\n \nimport java.time.LocalDateTime;\n \nimport org.apache.dubbo.config.annotation.Reference;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\nimport org.springframework.stereotype.Service;\nimport org.test.controller.TestController;\nimport org.test.entity.Test;\n \nimport io.seata.core.context.RootContext;\nimport io.seata.core.exception.TransactionException;\nimport io.seata.spring.annotation.GlobalTransactional;\nimport io.seata.tm.api.GlobalTransactionContext;\n \n@Service\npublic class DemoService {\n\t@Reference(version = \"1.0.0\", timeout = 60000)\n\tprivate ITestService testService;\n\tprivate final static Logger logger = LoggerFactory.getLogger(DemoService.class);\n \n\t/**\n\t * 手动回滚示例\n\t * \n\t * @return\n\t */\n\t@GlobalTransactional\n\tpublic Object One() {\n\t\tlogger.info(\"seata分布式事务Id:{}\", RootContext.getXID());\n\t\tTest t = new Test();\n\t\tt.setOne(\"1\");\n\t\tt.setTwo(\"2\");\n\t\tt.setCreateTime(LocalDateTime.now());\n\t\ttestService.save(t);\n\t\ttry {\n\t\t\tint i = 1 / 0;\n\t\t\treturn true;\n\t\t} catch (Exception e) {\n\t\t\t// TODO: handle exception\n\t\t\ttry {\n\t\t\t\tlogger.info(\"载入事务id进行回滚\");\n\t\t\t\tGlobalTransactionContext.reload(RootContext.getXID()).rollback();\n\t\t\t} catch (TransactionException e1) {\n\t\t\t\t// TODO Auto-generated catch block\n\t\t\t\te1.printStackTrace();\n\t\t\t}\n\t\t}\n\t\treturn false;\n\t}\n \n\t/**\n\t * 抛出异常进行回滚示例\n\t * \n\t * @return\n\t */\n\t@GlobalTransactional\n\tpublic Object Two() {\n\t\tlogger.info(\"seata分布式事务Id:{}\", RootContext.getXID());\n\t\tTest t = new Test();\n\t\tt.setOne(\"1\");\n\t\tt.setTwo(\"2\");\n\t\tt.setCreateTime(LocalDateTime.now());\n\t\ttestService.save(t);\n\t\ttry {\n\t\t\tint i = 1 / 0;\n\t\t\treturn true;\n\t\t} catch (Exception e) {\n\t\t\t// TODO: handle exception\n\t\t\tthrow new RuntimeException();\n\t\t}\n\t}\n}\n\n
\n

一样创建resources文件夹,先创建常用的application.yml

\n
spring:\n  application:\n     name: test\n  datasource:\n     driver-class-name: com.mysql.cj.jdbc.Driver\n     url: jdbc:mysql://127.0.0.1:3306/test?userSSL=true&useUnicode=true&characterEncoding=UTF8&serverTimezone=Asia/Shanghai\n     username: root\n     password: 123456\n  mvc:\n    servlet:\n      load-on-startup: 1\n  http:\n    encoding:\n            force: true\n            charset: utf-8\n            enabled: true\n    multipart:\n      max-file-size: 10MB\n      max-request-size: 10MB\ndubbo:\n  registry:\n    id: my-registry\n    address:  zookeeper://127.0.0.1:2181?client=curator\n#    address:  zookeeper://127.0.0.1:2181?client=curator\n  application:\n    name: dubbo-demo-client\n    qos-enable: false\nserver:\n  port: 28888\n  max-http-header-size: 8192\n  address: 0.0.0.0\n  tomcat:\n    max-http-post-size: 104857600\n\n
\n

再把之前service配置好的file跟registry文件复制来,如果你的client组名称再配置类里修改了,那么这里的file文件内的组名称一样需要更改.

\n

\"\"

\n

完整的目录结构如上,这时候可以启动test-service后,再启动test-client,到swagger里测试咯

\n

​\t4.访问127.0.0.1:28888/swagger-ui.html做最后的收尾\t\t\"\"

\n

\"20191129143124\"

\n

这里数据我已经存了一条记录了,我们看看会不会成功回滚:

\n

\"20191129143252\"

\n

刷新数据库,发现还是只有一条数据:

\n

\"20191129143124\"

\n

再查看日志:

\n

\"20191129143407\"

\n

显示已经回滚,我们再看看seata-server的日志:

\n\n

显示回滚成功,事务id也是一致的,这下我们的分布式事务就跑通咯,通过打断点方式,大家可以查看undo_log,会发现再事务提交前,会存入一条事务信息的数据,如果回滚成功,该信息就会被删除.

\n

总结

\n

seata的整合还是比较简单易入手,稍微用心一些你肯定写的比我更好!

\n

欢迎大家也多去阅读seata,dubbo之类的源代码,能解决业务中遇到的大量的坑哦!

\n", - "link": "/zh-cn/blog/springboot-dubbo-mybatisplus-seata.html", - "meta": { - "title": "SpringBoot+Dubbo+MybatisPlus整合seata分布式事务", - "keywords": "Seata,dubbo,mybatis,分布式事务", - "description": "本文讲述如何将springboot+dubbo+mybatisplus整合seata直连方式搭建", - "author": "FUNKYE", - "date": "2019/11/29" - } -} \ No newline at end of file diff --git a/zh-cn/blog/tcc-mode-applicable-scenario-analysis.html b/zh-cn/blog/tcc-mode-applicable-scenario-analysis.html deleted file mode 100644 index faa1d469..00000000 --- a/zh-cn/blog/tcc-mode-applicable-scenario-analysis.html +++ /dev/null @@ -1,119 +0,0 @@ - - - - - - - - - - TCC适用模型与适用场景分析 - - - - -

TCC 适用模型与适用场景分析

-

Fescar 0.4.0 版本发布了TCC 模式,由蚂蚁金服团队贡献,欢迎大家试用,文末也提供了项目后续的 Roadmap,欢迎关注。 -

-

前言:基于 TCC 模型的应用场景

-


-1.png

-

TCC 分布式事务模型直接作用于服务层。不与具体的服务框架耦合,与底层 RPC 协议无关,与底层存储介质无关,可以灵活选择业务资源的锁定粒度,减少资源锁持有时间,可扩展性好,可以说是为独立部署的 SOA 服务而设计的。

-

-

一、TCC 模型优势

-

对于 TCC 分布式事务模型,笔者认为其在业务场景应用上,有两方面的意义。

-

-

1.1 跨服务的分布式事务

-

服务的拆分,也可以认为是资源的横向扩展,只不过方向不同而已。

-

横向扩展可能沿着两个方向发展:

-
    -
  1. 功能扩展,根据功能对数据进行分组,并将不同的功能组分布在多个不同的数据库上,这实际上就是 SOA 架构下的服务化。
  2. -
  3. 数据分片,在功能组内部将数据拆分到多个数据库上,为横向扩展增加一个新的维度。
  4. -
-

下图简要阐释了横向数据扩展策略:

-

2.png

-

因此,TCC 的其中一个作用就是在按照功能横向扩展资源时,保证多资源访问的事务属性。

-

-

1.2 两阶段拆分

-

TCC 另一个作用就是把两阶段拆分成了两个独立的阶段,通过资源业务锁定的方式进行关联。资源业务锁定方式的好处在于,既不会阻塞其他事务在第一阶段对于相同资源的继续使用,也不会影响本事务第二阶段的正确执行。

-

传统模型的并发事务:
-3.png

-

TCC 模型的并发事务:
-4.png

-

这对业务有什么好处呢?拿支付宝的担保交易场景来说,简化情况下,只需要涉及两个服务,交易服务和账务服务。交易作为主业务服务,账务作为从业务服务,提供 Try、Commit、Cancel 接口:

-
    -
  1. Try 接口扣除用户可用资金,转移到预冻结资金。预冻结资金就是业务锁定方案,每个事务第二阶段只能使用本事务的预冻结资金,在第一阶段执行结束后,其他并发事务也可以继续处理用户的可用资金。
  2. -
  3. Commit 接口扣除预冻结资金,增加中间账户可用资金(担保交易不能立即把钱打给商户,需要有一个中间账户来暂存)。
  4. -
-

假设只有一个中间账户的情况下,每次调用支付服务的 Commit 接口,都会锁定中间账户,中间账户存在热点性能问题。 但是,在担保交易场景中,七天以后才需要将资金从中间账户划拨给商户,中间账户并不需要对外展示。因此,在执行完支付服务的第一阶段后,就可以认为本次交易的支付环节已经完成,并向用户和商户返回支付成功的结果,并不需要马上执行支付服务二阶段的 Commit 接口,等到低锋期时,再慢慢消化,异步地执行。
-5.png

-

这就是 TCC 分布式事务模型的二阶段异步化功能,从业务服务的第一阶段执行成功,主业务服务就可以提交完成,然后再由框架异步的执行各从业务服务的第二阶段。

-

-

二、通用型 TCC 解决方案

-

通用型 TCC 解决方案就是最典型的 TCC 分布式事务模型实现,所有从业务服务都需要参与到主业务服务的决策当中。
-6.png
  -

-

适用场景

-

由于从业务服务是同步调用,其结果会影响到主业务服务的决策,因此通用型 TCC 分布式事务解决方案适用于执行时间确定且较短的业务,比如互联网金融企业最核心的三个服务:交易、支付、账务:
-7.png
 
当用户发起一笔交易时,首先访问交易服务,创建交易订单;然后交易服务调用支付服务为该交易创建支付订单,执行收款动作,最后支付服务调用账务服务记录账户流水和记账。

-

为了保证三个服务一起完成一笔交易,要么同时成功,要么同时失败,可以使用通用型 TCC 解决方案,将这三个服务放在一个分布式事务中,交易作为主业务服务,支付作为从业务服务,账务作为支付服务的嵌套从业务服务,由 TCC 模型保证事务的原子性。
-8.png

-

支付服务的 Try 接口创建支付订单,开启嵌套分布式事务,并调用账务服务的 Try 接口;账务服务在 Try 接口中冻结买家资金。一阶段调用完成后,交易完成,提交本地事务,由 TCC 框架完成分布式事务各从业务服务二阶段的调用。

-

支付服务二阶段先调用账务服务的 Confirm 接口,扣除买家冻结资金;增加卖家可用资金。调用成功后,支付服务修改支付订单为完成状态,完成支付。

-

当支付和账务服务二阶段都调用完成后,整个分布式事务结束。

-

-

三、异步确保型 TCC 解决方案

-

异步确保型 TCC 解决方案的直接从业务服务是可靠消息服务,而真正的从业务服务则通过消息服务解耦,作为消息服务的消费端,异步地执行。
-9.png
 
可靠消息服务需要提供 Try,Confirm,Cancel 三个接口。Try 接口预发送,只负责持久化存储消息数据;Confirm 接口确认发送,这时才开始真正的投递消息;Cancel 接口取消发送,删除消息数据。

-

消息服务的消息数据独立存储,独立伸缩,降低从业务服务与消息系统间的耦合,在消息服务可靠的前提下,实现分布式事务的最终一致性。

-

此解决方案虽然增加了消息服务的维护成本,但由于消息服务代替从业务服务实现了 TCC 接口,从业务服务不需要任何改造,接入成本非常低。

-

-

适用场景

-

由于从业务服务消费消息是一个异步的过程,执行时间不确定,可能会导致不一致时间窗口增加。因此,异步确保性 TCC 分布式事务解决方案只适用于对最终一致性时间敏感度较低的一些被动型业务(从业务服务的处理结果不影响主业务服务的决策,只被动的接收主业务服务的决策结果)。比如会员注册服务和邮件发送服务:
-10.png
 
当用户注册会员成功,需要给用户发送一封邮件,告诉用户注册成功,并提示用户激活该会员。但要注意两点:

-
    -
  1. 如果用户注册成功,一定要给用户发送一封邮件;
  2. -
  3. 如果用户注册失败,一定不能给用户发送邮件。
  4. -
-

因此,这同样需要会员服务和邮件服务保证原子性,要么都执行,要么都不执行。不一样的是,邮件服务只是一种被动型的业务,并不影响用户是否能够注册成功,它只需要在用户注册成功以后发送邮件给用户即可,邮件服务不需要参与到会员服务的活动决策中。

-

对于此种业务场景,可以使用异步确保型TCC分布式事务解决方案,如下:
-11.png
 
 
由可靠消息服务来解耦会员和邮件服务,会员服务与消息服务组成 TCC 事务模型,保证事务原子性。然后通过消息服务的可靠特性,确保消息一定能够被邮件服务消费,从而使得会员与邮件服务在同一个分布式事务中。同时,邮件服务也不会影响会员服务的执行过程,只在会员服务执行成功后被动接收发送邮件的请求。

-

-

四、补偿型 TCC 解决方案

-

补偿型 TCC 解决方案与通用型 TCC 解决方案的结构相似,其从业务服务也需要参与到主业务服务的活动决策当中。但不一样的是,前者的从业务服务只需要提供 Do 和 Compensate 两个接口,而后者需要提供三个接口。
-12.png
 
Do 接口直接执行真正的完整业务逻辑,完成业务处理,业务执行结果外部可见;Compensate 操作用于业务补偿,抵消或部分抵消正向业务操作的业务结果,Compensate操作需满足幂等性。
与通用型解决方案相比,补偿型解决方案的从业务服务不需要改造原有业务逻辑,只需要额外增加一个补偿回滚逻辑即可,业务改造量较小。但要注意的是,业务在一阶段就执行完整个业务逻辑,无法做到有效的事务隔离,当需要回滚时,可能存在补偿失败的情况,还需要额外的异常处理机制,比如人工介入。

-

-

适用场景

-

由于存在回滚补偿失败的情况,补偿型 TCC 分布式事务解决方案只适用于一些并发冲突较少或者需要与外部交互的业务,这些外部业务不属于被动型业务,其执行结果会影响主业务服务的决策,比如机票代理商的机票预订服务:
-13.png
 
该机票服务提供多程机票预订服务,可以同时预订多趟行程航班机票,比如从北京到圣彼得堡,需要第一程从北京到莫斯科,以及第二程从莫斯科到圣彼得堡。

-

当用户预订机票时,肯定希望能同时预订这两趟航班的机票,只预订一趟航班对用户来说没有意义。因此,对于这样的业务服务同样提出了原子性要求,如果其中一趟航班的机票预订失败,另外一趟需要能够取消预订。

-

但是,由于航空公司相对于机票代理商来说属于外部业务,只提供订票接口和取消预订接口,想要推动航空公司改造是极其困难的。因此,对于此类业务服务,可以使用补偿型 TCC 分布式事务解决方案,如下:
-14.png

-

网关服务在原有逻辑基础上增加 Compensate 接口,负责调用对应航空公司的取消预订接口。

-

在用户发起机票预订请求时,机票服务先通过网关 Do 接口,调用各航空公司的预订接口,如果所有航班都预订成功,则整个分布式事务直接执行成功;一旦某趟航班机票预订失败,则分布式事务回滚,由 TCC 事务框架调用各网关的 Compensate 补偿接口,其再调用对应航空公司的取消预订接口。通过这种方式,也可以保证多程机票预订服务的原子性。

-

-

五. 总结

-

对于现在的互联网应用来说,资源横向扩展提供了更多的灵活性,是一种比较容易实现的向外扩展方案,但是同时也明显增加了复杂度,引入一些新的挑战,比如资源之间的数据一致性问题。

-

横向数据扩展既可以按数据分片扩展,也可以按功能扩展。TCC 模型能在功能横向扩展资源的同时,保证多资源访问的事务属性。

-

TCC 模型除了跨服务的分布式事务这一层作用之外,还具有两阶段划分的功能,通过业务资源锁定,允许第二阶段的异步执行,而异步化思想正是解决热点数据并发性能问题的利器之一。
  -

-

Roadmap

-

当前已经发布到 0.4.0,后续我们会发布 0.5 ~ 1.0 版本,继续对 AT、TCC 模式进行功能完善和和丰富,并解决服务端高可用问题,在 1.0 版本之后,本开源产品将达到生产环境使用的标准。


图片1.png

-
- - - - - - - diff --git a/zh-cn/blog/tcc-mode-applicable-scenario-analysis.json b/zh-cn/blog/tcc-mode-applicable-scenario-analysis.json deleted file mode 100644 index 2780c4d3..00000000 --- a/zh-cn/blog/tcc-mode-applicable-scenario-analysis.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "filename": "tcc-mode-applicable-scenario-analysis.md", - "__html": "

TCC 适用模型与适用场景分析

\n

Fescar 0.4.0 版本发布了TCC 模式,由蚂蚁金服团队贡献,欢迎大家试用,文末也提供了项目后续的 Roadmap,欢迎关注。\n

\n

前言:基于 TCC 模型的应用场景

\n


\n\"1.png\"

\n

TCC 分布式事务模型直接作用于服务层。不与具体的服务框架耦合,与底层 RPC 协议无关,与底层存储介质无关,可以灵活选择业务资源的锁定粒度,减少资源锁持有时间,可扩展性好,可以说是为独立部署的 SOA 服务而设计的。

\n

\n

一、TCC 模型优势

\n

对于 TCC 分布式事务模型,笔者认为其在业务场景应用上,有两方面的意义。

\n

\n

1.1 跨服务的分布式事务

\n

服务的拆分,也可以认为是资源的横向扩展,只不过方向不同而已。

\n

横向扩展可能沿着两个方向发展:

\n
    \n
  1. 功能扩展,根据功能对数据进行分组,并将不同的功能组分布在多个不同的数据库上,这实际上就是 SOA 架构下的服务化。
  2. \n
  3. 数据分片,在功能组内部将数据拆分到多个数据库上,为横向扩展增加一个新的维度。
  4. \n
\n

下图简要阐释了横向数据扩展策略:

\n

\"2.png\"

\n

因此,TCC 的其中一个作用就是在按照功能横向扩展资源时,保证多资源访问的事务属性。

\n

\n

1.2 两阶段拆分

\n

TCC 另一个作用就是把两阶段拆分成了两个独立的阶段,通过资源业务锁定的方式进行关联。资源业务锁定方式的好处在于,既不会阻塞其他事务在第一阶段对于相同资源的继续使用,也不会影响本事务第二阶段的正确执行。

\n

传统模型的并发事务:
\n\"3.png\"

\n

TCC 模型的并发事务:
\n\"4.png\"

\n

这对业务有什么好处呢?拿支付宝的担保交易场景来说,简化情况下,只需要涉及两个服务,交易服务和账务服务。交易作为主业务服务,账务作为从业务服务,提供 Try、Commit、Cancel 接口:

\n
    \n
  1. Try 接口扣除用户可用资金,转移到预冻结资金。预冻结资金就是业务锁定方案,每个事务第二阶段只能使用本事务的预冻结资金,在第一阶段执行结束后,其他并发事务也可以继续处理用户的可用资金。
  2. \n
  3. Commit 接口扣除预冻结资金,增加中间账户可用资金(担保交易不能立即把钱打给商户,需要有一个中间账户来暂存)。
  4. \n
\n

假设只有一个中间账户的情况下,每次调用支付服务的 Commit 接口,都会锁定中间账户,中间账户存在热点性能问题。 但是,在担保交易场景中,七天以后才需要将资金从中间账户划拨给商户,中间账户并不需要对外展示。因此,在执行完支付服务的第一阶段后,就可以认为本次交易的支付环节已经完成,并向用户和商户返回支付成功的结果,并不需要马上执行支付服务二阶段的 Commit 接口,等到低锋期时,再慢慢消化,异步地执行。
\n\"5.png\"

\n

这就是 TCC 分布式事务模型的二阶段异步化功能,从业务服务的第一阶段执行成功,主业务服务就可以提交完成,然后再由框架异步的执行各从业务服务的第二阶段。

\n

\n

二、通用型 TCC 解决方案

\n

通用型 TCC 解决方案就是最典型的 TCC 分布式事务模型实现,所有从业务服务都需要参与到主业务服务的决策当中。
\n\"6.png\"
 \n

\n

适用场景

\n

由于从业务服务是同步调用,其结果会影响到主业务服务的决策,因此通用型 TCC 分布式事务解决方案适用于执行时间确定且较短的业务,比如互联网金融企业最核心的三个服务:交易、支付、账务:
\n\"7.png\"
 
当用户发起一笔交易时,首先访问交易服务,创建交易订单;然后交易服务调用支付服务为该交易创建支付订单,执行收款动作,最后支付服务调用账务服务记录账户流水和记账。

\n

为了保证三个服务一起完成一笔交易,要么同时成功,要么同时失败,可以使用通用型 TCC 解决方案,将这三个服务放在一个分布式事务中,交易作为主业务服务,支付作为从业务服务,账务作为支付服务的嵌套从业务服务,由 TCC 模型保证事务的原子性。
\n\"8.png\"

\n

支付服务的 Try 接口创建支付订单,开启嵌套分布式事务,并调用账务服务的 Try 接口;账务服务在 Try 接口中冻结买家资金。一阶段调用完成后,交易完成,提交本地事务,由 TCC 框架完成分布式事务各从业务服务二阶段的调用。

\n

支付服务二阶段先调用账务服务的 Confirm 接口,扣除买家冻结资金;增加卖家可用资金。调用成功后,支付服务修改支付订单为完成状态,完成支付。

\n

当支付和账务服务二阶段都调用完成后,整个分布式事务结束。

\n

\n

三、异步确保型 TCC 解决方案

\n

异步确保型 TCC 解决方案的直接从业务服务是可靠消息服务,而真正的从业务服务则通过消息服务解耦,作为消息服务的消费端,异步地执行。
\n\"9.png\"
 
可靠消息服务需要提供 Try,Confirm,Cancel 三个接口。Try 接口预发送,只负责持久化存储消息数据;Confirm 接口确认发送,这时才开始真正的投递消息;Cancel 接口取消发送,删除消息数据。

\n

消息服务的消息数据独立存储,独立伸缩,降低从业务服务与消息系统间的耦合,在消息服务可靠的前提下,实现分布式事务的最终一致性。

\n

此解决方案虽然增加了消息服务的维护成本,但由于消息服务代替从业务服务实现了 TCC 接口,从业务服务不需要任何改造,接入成本非常低。

\n

\n

适用场景

\n

由于从业务服务消费消息是一个异步的过程,执行时间不确定,可能会导致不一致时间窗口增加。因此,异步确保性 TCC 分布式事务解决方案只适用于对最终一致性时间敏感度较低的一些被动型业务(从业务服务的处理结果不影响主业务服务的决策,只被动的接收主业务服务的决策结果)。比如会员注册服务和邮件发送服务:
\n\"10.png\"
 
当用户注册会员成功,需要给用户发送一封邮件,告诉用户注册成功,并提示用户激活该会员。但要注意两点:

\n
    \n
  1. 如果用户注册成功,一定要给用户发送一封邮件;
  2. \n
  3. 如果用户注册失败,一定不能给用户发送邮件。
  4. \n
\n

因此,这同样需要会员服务和邮件服务保证原子性,要么都执行,要么都不执行。不一样的是,邮件服务只是一种被动型的业务,并不影响用户是否能够注册成功,它只需要在用户注册成功以后发送邮件给用户即可,邮件服务不需要参与到会员服务的活动决策中。

\n

对于此种业务场景,可以使用异步确保型TCC分布式事务解决方案,如下:
\n\"11.png\"
 
 
由可靠消息服务来解耦会员和邮件服务,会员服务与消息服务组成 TCC 事务模型,保证事务原子性。然后通过消息服务的可靠特性,确保消息一定能够被邮件服务消费,从而使得会员与邮件服务在同一个分布式事务中。同时,邮件服务也不会影响会员服务的执行过程,只在会员服务执行成功后被动接收发送邮件的请求。

\n

\n

四、补偿型 TCC 解决方案

\n

补偿型 TCC 解决方案与通用型 TCC 解决方案的结构相似,其从业务服务也需要参与到主业务服务的活动决策当中。但不一样的是,前者的从业务服务只需要提供 Do 和 Compensate 两个接口,而后者需要提供三个接口。
\n\"12.png\"
 
Do 接口直接执行真正的完整业务逻辑,完成业务处理,业务执行结果外部可见;Compensate 操作用于业务补偿,抵消或部分抵消正向业务操作的业务结果,Compensate操作需满足幂等性。
与通用型解决方案相比,补偿型解决方案的从业务服务不需要改造原有业务逻辑,只需要额外增加一个补偿回滚逻辑即可,业务改造量较小。但要注意的是,业务在一阶段就执行完整个业务逻辑,无法做到有效的事务隔离,当需要回滚时,可能存在补偿失败的情况,还需要额外的异常处理机制,比如人工介入。

\n

\n

适用场景

\n

由于存在回滚补偿失败的情况,补偿型 TCC 分布式事务解决方案只适用于一些并发冲突较少或者需要与外部交互的业务,这些外部业务不属于被动型业务,其执行结果会影响主业务服务的决策,比如机票代理商的机票预订服务:
\n\"13.png\"
 
该机票服务提供多程机票预订服务,可以同时预订多趟行程航班机票,比如从北京到圣彼得堡,需要第一程从北京到莫斯科,以及第二程从莫斯科到圣彼得堡。

\n

当用户预订机票时,肯定希望能同时预订这两趟航班的机票,只预订一趟航班对用户来说没有意义。因此,对于这样的业务服务同样提出了原子性要求,如果其中一趟航班的机票预订失败,另外一趟需要能够取消预订。

\n

但是,由于航空公司相对于机票代理商来说属于外部业务,只提供订票接口和取消预订接口,想要推动航空公司改造是极其困难的。因此,对于此类业务服务,可以使用补偿型 TCC 分布式事务解决方案,如下:
\n\"14.png\"

\n

网关服务在原有逻辑基础上增加 Compensate 接口,负责调用对应航空公司的取消预订接口。

\n

在用户发起机票预订请求时,机票服务先通过网关 Do 接口,调用各航空公司的预订接口,如果所有航班都预订成功,则整个分布式事务直接执行成功;一旦某趟航班机票预订失败,则分布式事务回滚,由 TCC 事务框架调用各网关的 Compensate 补偿接口,其再调用对应航空公司的取消预订接口。通过这种方式,也可以保证多程机票预订服务的原子性。

\n

\n

五. 总结

\n

对于现在的互联网应用来说,资源横向扩展提供了更多的灵活性,是一种比较容易实现的向外扩展方案,但是同时也明显增加了复杂度,引入一些新的挑战,比如资源之间的数据一致性问题。

\n

横向数据扩展既可以按数据分片扩展,也可以按功能扩展。TCC 模型能在功能横向扩展资源的同时,保证多资源访问的事务属性。

\n

TCC 模型除了跨服务的分布式事务这一层作用之外,还具有两阶段划分的功能,通过业务资源锁定,允许第二阶段的异步执行,而异步化思想正是解决热点数据并发性能问题的利器之一。
 \n

\n

Roadmap

\n

当前已经发布到 0.4.0,后续我们会发布 0.5 ~ 1.0 版本,继续对 AT、TCC 模式进行功能完善和和丰富,并解决服务端高可用问题,在 1.0 版本之后,本开源产品将达到生产环境使用的标准。


\"图片1.png\"

\n", - "link": "/zh-cn/blog/tcc-mode-applicable-scenario-analysis.html", - "meta": { - "title": "TCC适用模型与适用场景分析", - "author": "zhangthen", - "date": "2019/03/27", - "keywords": "seata、分布式事务、TCC、roadmap" - } -} \ No newline at end of file diff --git a/zh-cn/blog/tcc-mode-design-principle.html b/zh-cn/blog/tcc-mode-design-principle.html deleted file mode 100644 index 0ce4ed1a..00000000 --- a/zh-cn/blog/tcc-mode-design-principle.html +++ /dev/null @@ -1,86 +0,0 @@ - - - - - - - - - - TCC 理论及设计实现指南介绍 - - - - -

TCC 理论及设计实现指南介绍

-

Fescar 0.4.0 版本发布了 TCC 模式,由蚂蚁金服团队贡献,欢迎大家试用,
Sample 地址:https://github.com/fescar-group/fescar-samples/tree/master/tcc
文末也提供了项目后续的 Roadmap,欢迎关注。

-

-

一、TCC 简介

-

在两阶段提交协议(2PC,Two Phase Commitment Protocol)中,资源管理器(RM, resource manager)需要提供“准备”、“提交”和“回滚” 3 个操作;而事务管理器(TM, transaction manager)分 2 阶段协调所有资源管理器,在第一阶段询问所有资源管理器“准备”是否成功,如果所有资源均“准备”成功则在第二阶段执行所有资源的“提交”操作,否则在第二阶段执行所有资源的“回滚”操作,保证所有资源的最终状态是一致的,要么全部提交要么全部回滚。

-

资源管理器有很多实现方式,其中 TCC(Try-Confirm-Cancel)是资源管理器的一种服务化的实现;TCC 是一种比较成熟的分布式事务解决方案,可用于解决跨数据库、跨服务业务操作的数据一致性问题;TCC 其 Try、Confirm、Cancel 3 个方法均由业务编码实现,故 TCC 可以被称为是服务化的资源管理器。

-

TCC 的 Try 操作作为一阶段,负责资源的检查和预留;Confirm 操作作为二阶段提交操作,执行真正的业务;Cancel 是二阶段回滚操作,执行预留资源的取消,使资源回到初始状态。

-

如下图所示,用户实现 TCC 服务之后,该 TCC 服务将作为分布式事务的其中一个资源,参与到整个分布式事务中;事务管理器分 2 阶段协调 TCC 服务,在第一阶段调用所有 TCC 服务的 Try 方法,在第二阶段执行所有 TCC 服务的 Confirm 或者 Cancel 方法;最终所有 TCC 服务要么全部都是提交的,要么全部都是回滚的。

-

image.png

-

-

二、TCC 设计

-

用户在接入 TCC 时,大部分工作都集中在如何实现 TCC 服务上,经过蚂蚁金服多年的 TCC 应用,总结如下主要的TCC 设计和实现主要事项:

-

-

1、业务操作分两阶段完成

-

接入 TCC 前,业务操作只需要一步就能完成,但是在接入 TCC 之后,需要考虑如何将其分成 2 阶段完成,把资源的检查和预留放在一阶段的 Try 操作中进行,把真正的业务操作的执行放在二阶段的 Confirm 操作中进行。

-

以下举例说明业务模式如何分成两阶段进行设计,举例场景:“账户A的余额中有 100 元,需要扣除其中 30 元”;

-

在接入 TCC 之前,用户编写 SQL:“update 账户表 set 余额 = 余额 -20 where 账户 = A”,便能一步完成扣款操作。

-

在接入 TCC 之后,就需要考虑如何将扣款操作分成 2 步完成:

-
    -
  • Try 操作:资源的检查和预留;
  • -
-

在扣款场景,Try 操作要做的事情就是先检查 A 账户余额是否足够,再冻结要扣款的 30 元(预留资源);此阶段不会发生真正的扣款。

-
    -
  • Confirm 操作:执行真正业务的提交;
  • -
-

在扣款场景下,Confirm 阶段走的事情就是发生真正的扣款,把A账户中已经冻结的 30 元钱扣掉。

-
    -
  • Cancel 操作:预留资源的是否;
  • -
-

在扣款场景下,扣款取消,Cancel 操作执行的任务是释放 Try 操作冻结的 30 元钱,是 A 账户回到初始状态。

-

image.png

-

-

2、并发控制

-

用户在实现 TCC 时,应当考虑并发性问题,将锁的粒度降到最低,以最大限度的提高分布式事务的并发性。

-

以下还是以A账户扣款为例,“账户 A 上有 100 元,事务 T1 要扣除其中的 30 元,事务 T2 也要扣除 30 元,出现并发”。

-

在一阶段 Try 操作中,分布式事务 T1 和分布式事务 T2 分别冻结资金的那一部分资金,相互之间无干扰;这样在分布式事务的二阶段,无论 T1 是提交还是回滚,都不会对 T2 产生影响,这样 T1 和 T2 在同一笔业务数据上并行执行。

-

image.png

-

-

3、允许空回滚

-

如下图所示,事务协调器在调用 TCC 服务的一阶段 Try 操作时,可能会出现因为丢包而导致的网络超时,此时事务管理器会触发二阶段回滚,调用 TCC 服务的 Cancel 操作,而 Cancel 操作调用未出现超时。

-

TCC 服务在未收到 Try 请求的情况下收到 Cancel 请求,这种场景被称为空回滚;空回滚在生产环境经常出现,用户在实现TCC服务时,应允许允许空回滚的执行,即收到空回滚时返回成功。

-

image.png

-

-

4、防悬挂控制

-

如下图所示,事务协调器在调用 TCC 服务的一阶段 Try 操作时,可能会出现因网络拥堵而导致的超时,此时事务管理器会触发二阶段回滚,调用 TCC 服务的 Cancel 操作,Cancel 调用未超时;在此之后,拥堵在网络上的一阶段 Try 数据包被 TCC 服务收到,出现了二阶段 Cancel 请求比一阶段 Try 请求先执行的情况,此 TCC 服务在执行晚到的 Try 之后,将永远不会再收到二阶段的 Confirm 或者 Cancel ,造成 TCC 服务悬挂。

-

用户在实现  TCC 服务时,要允许空回滚,但是要拒绝执行空回滚之后 Try 请求,要避免出现悬挂。

-

image.png

-

-

5、幂等控制

-

无论是网络数据包重传,还是异常事务的补偿执行,都会导致 TCC 服务的 Try、Confirm 或者 Cancel 操作被重复执行;用户在实现 TCC 服务时,需要考虑幂等控制,即 Try、Confirm、Cancel 执行一次和执行多次的业务结果是一样的。
image.png

-

-

Roadmap

-

当前已经发布到 0.4.0 版本,后续我们会发布 0.5 ~ 1.0 版本,继续对 AT、TCC 模式进行功能完善和和丰富,并解决服务端高可用问题,在 1.0 版本之后,本开源产品将达到生产环境使用的标准。

-

图片1.png

-
- - - - - - - diff --git a/zh-cn/blog/tcc-mode-design-principle.json b/zh-cn/blog/tcc-mode-design-principle.json deleted file mode 100644 index 130a9ec7..00000000 --- a/zh-cn/blog/tcc-mode-design-principle.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "filename": "tcc-mode-design-principle.md", - "__html": "

TCC 理论及设计实现指南介绍

\n

Fescar 0.4.0 版本发布了 TCC 模式,由蚂蚁金服团队贡献,欢迎大家试用,
Sample 地址:https://github.com/fescar-group/fescar-samples/tree/master/tcc
文末也提供了项目后续的 Roadmap,欢迎关注。

\n

\n

一、TCC 简介

\n

在两阶段提交协议(2PC,Two Phase Commitment Protocol)中,资源管理器(RM, resource manager)需要提供“准备”、“提交”和“回滚” 3 个操作;而事务管理器(TM, transaction manager)分 2 阶段协调所有资源管理器,在第一阶段询问所有资源管理器“准备”是否成功,如果所有资源均“准备”成功则在第二阶段执行所有资源的“提交”操作,否则在第二阶段执行所有资源的“回滚”操作,保证所有资源的最终状态是一致的,要么全部提交要么全部回滚。

\n

资源管理器有很多实现方式,其中 TCC(Try-Confirm-Cancel)是资源管理器的一种服务化的实现;TCC 是一种比较成熟的分布式事务解决方案,可用于解决跨数据库、跨服务业务操作的数据一致性问题;TCC 其 Try、Confirm、Cancel 3 个方法均由业务编码实现,故 TCC 可以被称为是服务化的资源管理器。

\n

TCC 的 Try 操作作为一阶段,负责资源的检查和预留;Confirm 操作作为二阶段提交操作,执行真正的业务;Cancel 是二阶段回滚操作,执行预留资源的取消,使资源回到初始状态。

\n

如下图所示,用户实现 TCC 服务之后,该 TCC 服务将作为分布式事务的其中一个资源,参与到整个分布式事务中;事务管理器分 2 阶段协调 TCC 服务,在第一阶段调用所有 TCC 服务的 Try 方法,在第二阶段执行所有 TCC 服务的 Confirm 或者 Cancel 方法;最终所有 TCC 服务要么全部都是提交的,要么全部都是回滚的。

\n

\"image.png\"

\n

\n

二、TCC 设计

\n

用户在接入 TCC 时,大部分工作都集中在如何实现 TCC 服务上,经过蚂蚁金服多年的 TCC 应用,总结如下主要的TCC 设计和实现主要事项:

\n

\n

1、业务操作分两阶段完成

\n

接入 TCC 前,业务操作只需要一步就能完成,但是在接入 TCC 之后,需要考虑如何将其分成 2 阶段完成,把资源的检查和预留放在一阶段的 Try 操作中进行,把真正的业务操作的执行放在二阶段的 Confirm 操作中进行。

\n

以下举例说明业务模式如何分成两阶段进行设计,举例场景:“账户A的余额中有 100 元,需要扣除其中 30 元”;

\n

在接入 TCC 之前,用户编写 SQL:“update 账户表 set 余额 = 余额 -20 where 账户 = A”,便能一步完成扣款操作。

\n

在接入 TCC 之后,就需要考虑如何将扣款操作分成 2 步完成:

\n
    \n
  • Try 操作:资源的检查和预留;
  • \n
\n

在扣款场景,Try 操作要做的事情就是先检查 A 账户余额是否足够,再冻结要扣款的 30 元(预留资源);此阶段不会发生真正的扣款。

\n
    \n
  • Confirm 操作:执行真正业务的提交;
  • \n
\n

在扣款场景下,Confirm 阶段走的事情就是发生真正的扣款,把A账户中已经冻结的 30 元钱扣掉。

\n
    \n
  • Cancel 操作:预留资源的是否;
  • \n
\n

在扣款场景下,扣款取消,Cancel 操作执行的任务是释放 Try 操作冻结的 30 元钱,是 A 账户回到初始状态。

\n

\"image.png\"

\n

\n

2、并发控制

\n

用户在实现 TCC 时,应当考虑并发性问题,将锁的粒度降到最低,以最大限度的提高分布式事务的并发性。

\n

以下还是以A账户扣款为例,“账户 A 上有 100 元,事务 T1 要扣除其中的 30 元,事务 T2 也要扣除 30 元,出现并发”。

\n

在一阶段 Try 操作中,分布式事务 T1 和分布式事务 T2 分别冻结资金的那一部分资金,相互之间无干扰;这样在分布式事务的二阶段,无论 T1 是提交还是回滚,都不会对 T2 产生影响,这样 T1 和 T2 在同一笔业务数据上并行执行。

\n

\"image.png\"

\n

\n

3、允许空回滚

\n

如下图所示,事务协调器在调用 TCC 服务的一阶段 Try 操作时,可能会出现因为丢包而导致的网络超时,此时事务管理器会触发二阶段回滚,调用 TCC 服务的 Cancel 操作,而 Cancel 操作调用未出现超时。

\n

TCC 服务在未收到 Try 请求的情况下收到 Cancel 请求,这种场景被称为空回滚;空回滚在生产环境经常出现,用户在实现TCC服务时,应允许允许空回滚的执行,即收到空回滚时返回成功。

\n

\"image.png\"

\n

\n

4、防悬挂控制

\n

如下图所示,事务协调器在调用 TCC 服务的一阶段 Try 操作时,可能会出现因网络拥堵而导致的超时,此时事务管理器会触发二阶段回滚,调用 TCC 服务的 Cancel 操作,Cancel 调用未超时;在此之后,拥堵在网络上的一阶段 Try 数据包被 TCC 服务收到,出现了二阶段 Cancel 请求比一阶段 Try 请求先执行的情况,此 TCC 服务在执行晚到的 Try 之后,将永远不会再收到二阶段的 Confirm 或者 Cancel ,造成 TCC 服务悬挂。

\n

用户在实现  TCC 服务时,要允许空回滚,但是要拒绝执行空回滚之后 Try 请求,要避免出现悬挂。

\n

\"image.png\"

\n

\n

5、幂等控制

\n

无论是网络数据包重传,还是异常事务的补偿执行,都会导致 TCC 服务的 Try、Confirm 或者 Cancel 操作被重复执行;用户在实现 TCC 服务时,需要考虑幂等控制,即 Try、Confirm、Cancel 执行一次和执行多次的业务结果是一样的。
\"image.png\"

\n

\n

Roadmap

\n

当前已经发布到 0.4.0 版本,后续我们会发布 0.5 ~ 1.0 版本,继续对 AT、TCC 模式进行功能完善和和丰富,并解决服务端高可用问题,在 1.0 版本之后,本开源产品将达到生产环境使用的标准。

\n

\"图片1.png\"

\n", - "link": "/zh-cn/blog/tcc-mode-design-principle.html", - "meta": { - "title": "TCC 理论及设计实现指南介绍", - "author": "zhangthen", - "date": "2019/03/26", - "keywords": "fescar、分布式事务、TCC、roadmap" - } -} \ No newline at end of file diff --git a/zh-cn/community/index.html b/zh-cn/community/index.html deleted file mode 100644 index 263e5cfc..00000000 --- a/zh-cn/community/index.html +++ /dev/null @@ -1,32 +0,0 @@ - - - - - - - - - - 社区 - - - - -
社区

事件 & 新闻

seata

seata

May 12nd,2018

联系我们

有问题需要反馈?请通过一下方式联系我们。

贡献指南

欢迎为 Seata 做贡献!

邮件列表

加入我们的邮件列表。

报告问题

提交新问题

改进文档

改进文档

提交 PR

创建一个PR

- - - - - - - diff --git a/zh-cn/docs/community/activity.html b/zh-cn/docs/community/activity.html deleted file mode 100644 index a6274c66..00000000 --- a/zh-cn/docs/community/activity.html +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - activity - - - - -
文档

有奖活动

-

TBD

-
- - - - - - diff --git a/zh-cn/docs/community/activity.json b/zh-cn/docs/community/activity.json deleted file mode 100644 index dfc8f297..00000000 --- a/zh-cn/docs/community/activity.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "filename": "activity.md", - "__html": "

有奖活动

\n

TBD

\n", - "link": "/zh-cn/docs/community/activity.html", - "meta": {} -} \ No newline at end of file diff --git a/zh-cn/docs/community/contact.html b/zh-cn/docs/community/contact.html deleted file mode 100644 index a7c3e224..00000000 --- a/zh-cn/docs/community/contact.html +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - contact - - - - -
文档

联系我们

-

TBD

-
- - - - - - diff --git a/zh-cn/docs/community/contact.json b/zh-cn/docs/community/contact.json deleted file mode 100644 index 9be461c7..00000000 --- a/zh-cn/docs/community/contact.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "filename": "contact.md", - "__html": "

联系我们

\n

TBD

\n", - "link": "/zh-cn/docs/community/contact.html", - "meta": {} -} \ No newline at end of file diff --git a/zh-cn/docs/community/contribution.html b/zh-cn/docs/community/contribution.html deleted file mode 100644 index 713a104b..00000000 --- a/zh-cn/docs/community/contribution.html +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - contribution - - - - -
文档

社区贡献

-

TBD

-
- - - - - - diff --git a/zh-cn/docs/community/contribution.json b/zh-cn/docs/community/contribution.json deleted file mode 100644 index 788dcabb..00000000 --- a/zh-cn/docs/community/contribution.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "filename": "contribution.md", - "__html": "

社区贡献

\n

TBD

\n", - "link": "/zh-cn/docs/community/contribution.html", - "meta": {} -} \ No newline at end of file diff --git a/zh-cn/docs/community/question.html b/zh-cn/docs/community/question.html deleted file mode 100644 index a5da84ef..00000000 --- a/zh-cn/docs/community/question.html +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - question - - - - -
文档

提交问题

-

TBD

-
- - - - - - diff --git a/zh-cn/docs/community/question.json b/zh-cn/docs/community/question.json deleted file mode 100644 index f85bd357..00000000 --- a/zh-cn/docs/community/question.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "filename": "question.md", - "__html": "

提交问题

\n

TBD

\n", - "link": "/zh-cn/docs/community/question.html", - "meta": {} -} \ No newline at end of file diff --git a/zh-cn/docs/community/roadmap.html b/zh-cn/docs/community/roadmap.html deleted file mode 100644 index 71d3a226..00000000 --- a/zh-cn/docs/community/roadmap.html +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - roadmap - - - - -
文档

社区规划

-

TBD

-
- - - - - - diff --git a/zh-cn/docs/community/roadmap.json b/zh-cn/docs/community/roadmap.json deleted file mode 100644 index 1536bc5a..00000000 --- a/zh-cn/docs/community/roadmap.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "filename": "roadmap.md", - "__html": "

社区规划

\n

TBD

\n", - "link": "/zh-cn/docs/community/roadmap.html", - "meta": {} -} \ No newline at end of file diff --git a/zh-cn/docs/dev/architecture/seata_at.html b/zh-cn/docs/dev/architecture/seata_at.html deleted file mode 100644 index ab96c467..00000000 --- a/zh-cn/docs/dev/architecture/seata_at.html +++ /dev/null @@ -1,270 +0,0 @@ - - - - - - - - - - seata_at - - - - -
文档

Seata AT 模式

-

前提

-
    -
  • 基于支持本地 ACID 事务的关系型数据库。
  • -
  • Java 应用,通过 JDBC 访问数据库。
  • -
-

整体机制

-

两阶段提交协议的演变:

-
    -
  • -

    一阶段:业务数据和回滚日志记录在同一个本地事务中提交,释放本地锁和连接资源。

    -
  • -
  • -

    二阶段:

    -
      -
    • 提交异步化,非常快速地完成。
    • -
    • 回滚通过一阶段的回滚日志进行反向补偿。
    • -
    -
  • -
-

写隔离

-
    -
  • 一阶段本地事务提交前,需要确保先拿到 全局锁
  • -
  • 拿不到 全局锁 ,不能提交本地事务。
  • -
  • 全局锁 的尝试被限制在一定范围内,超出范围将放弃,并回滚本地事务,释放本地锁。
  • -
-

以一个示例来说明:

-

两个全局事务 tx1 和 tx2,分别对 a 表的 m 字段进行更新操作,m 的初始值 1000。

-

tx1 先开始,开启本地事务,拿到本地锁,更新操作 m = 1000 - 100 = 900。本地事务提交前,先拿到该记录的 全局锁 ,本地提交释放本地锁。 -tx2 后开始,开启本地事务,拿到本地锁,更新操作 m = 900 - 100 = 800。本地事务提交前,尝试拿该记录的 全局锁 ,tx1 全局提交前,该记录的全局锁被 tx1 持有,tx2 需要重试等待 全局锁

-

Write-Isolation: Commit

-

tx1 二阶段全局提交,释放 全局锁 。tx2 拿到 全局锁 提交本地事务。

-

Write-Isolation: Rollback

-

如果 tx1 的二阶段全局回滚,则 tx1 需要重新获取该数据的本地锁,进行反向补偿的更新操作,实现分支的回滚。

-

此时,如果 tx2 仍在等待该数据的 全局锁,同时持有本地锁,则 tx1 的分支回滚会失败。分支的回滚会一直重试,直到 tx2 的 全局锁 等锁超时,放弃 全局锁 并回滚本地事务释放本地锁,tx1 的分支回滚最终成功。

-

因为整个过程 全局锁 在 tx1 结束前一直是被 tx1 持有的,所以不会发生 脏写 的问题。

-

读隔离

-

在数据库本地事务隔离级别 读已提交(Read Committed) 或以上的基础上,Seata(AT 模式)的默认全局隔离级别是 读未提交(Read Uncommitted)

-

如果应用在特定场景下,必需要求全局的 读已提交 ,目前 Seata 的方式是通过 SELECT FOR UPDATE 语句的代理。

-

Read Isolation: SELECT FOR UPDATE

-

SELECT FOR UPDATE 语句的执行会申请 全局锁 ,如果 全局锁 被其他事务持有,则释放本地锁(回滚 SELECT FOR UPDATE 语句的本地执行)并重试。这个过程中,查询是被 block 住的,直到 全局锁 拿到,即读取的相关数据是 已提交 的,才返回。

-

出于总体性能上的考虑,Seata 目前的方案并没有对所有 SELECT 语句都进行代理,仅针对 FOR UPDATE 的 SELECT 语句。

-

工作机制

-

以一个示例来说明整个 AT 分支的工作过程。

-

业务表:product

- - - - - - - - - - - - - - - - - - - - - - - - - -
FieldTypeKey
idbigint(20)PRI
namevarchar(100)
sincevarchar(100)
-

AT 分支事务的业务逻辑:

-
update product set name = 'GTS' where name = 'TXC';
-
-

一阶段

-

过程:

-
    -
  1. 解析 SQL:得到 SQL 的类型(UPDATE),表(product),条件(where name = 'TXC')等相关的信息。
  2. -
  3. 查询前镜像:根据解析得到的条件信息,生成查询语句,定位数据。
  4. -
-
select id, name, since from product where name = 'TXC';
-
-

得到前镜像:

- - - - - - - - - - - - - - - -
idnamesince
1TXC2014
-
    -
  1. 执行业务 SQL:更新这条记录的 name 为 'GTS'。
  2. -
  3. 查询后镜像:根据前镜像的结果,通过 主键 定位数据。
  4. -
-
select id, name, since from product where id = 1`;
-
-

得到后镜像:

- - - - - - - - - - - - - - - -
idnamesince
1GTS2014
-
    -
  1. 插入回滚日志:把前后镜像数据以及业务 SQL 相关的信息组成一条回滚日志记录,插入到 UNDO_LOG 表中。
  2. -
-
{
-	"branchId": 641789253,
-	"undoItems": [{
-		"afterImage": {
-			"rows": [{
-				"fields": [{
-					"name": "id",
-					"type": 4,
-					"value": 1
-				}, {
-					"name": "name",
-					"type": 12,
-					"value": "GTS"
-				}, {
-					"name": "since",
-					"type": 12,
-					"value": "2014"
-				}]
-			}],
-			"tableName": "product"
-		},
-		"beforeImage": {
-			"rows": [{
-				"fields": [{
-					"name": "id",
-					"type": 4,
-					"value": 1
-				}, {
-					"name": "name",
-					"type": 12,
-					"value": "TXC"
-				}, {
-					"name": "since",
-					"type": 12,
-					"value": "2014"
-				}]
-			}],
-			"tableName": "product"
-		},
-		"sqlType": "UPDATE"
-	}],
-	"xid": "xid:xxx"
-}
-
-
    -
  1. 提交前,向 TC 注册分支:申请 product 表中,主键值等于 1 的记录的 全局锁
  2. -
  3. 本地事务提交:业务数据的更新和前面步骤中生成的 UNDO LOG 一并提交。
  4. -
  5. 将本地事务提交的结果上报给 TC。
  6. -
-

二阶段-回滚

-
    -
  1. 收到 TC 的分支回滚请求,开启一个本地事务,执行如下操作。
  2. -
  3. 通过 XID 和 Branch ID 查找到相应的 UNDO LOG 记录。
  4. -
  5. 数据校验:拿 UNDO LOG 中的后镜与当前数据进行比较,如果有不同,说明数据被当前全局事务之外的动作做了修改。这种情况,需要根据配置策略来做处理,详细的说明在另外的文档中介绍。
  6. -
  7. 根据 UNDO LOG 中的前镜像和业务 SQL 的相关信息生成并执行回滚的语句:
  8. -
-
update product set name = 'TXC' where id = 1;
-
-
    -
  1. 提交本地事务。并把本地事务的执行结果(即分支事务回滚的结果)上报给 TC。
  2. -
-

二阶段-提交

-
    -
  1. 收到 TC 的分支提交请求,把请求放入一个异步任务的队列中,马上返回提交成功的结果给 TC。
  2. -
  3. 异步任务阶段的分支提交请求将异步和批量地删除相应 UNDO LOG 记录。
  4. -
-

附录

-

回滚日志表

-

UNDO_LOG Table:不同数据库在类型上会略有差别。

-

以 MySQL 为例:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldType
branch_idbigint PK
xidvarchar(100)
contextvarchar(128)
rollback_infolongblob
log_statustinyint
log_createddatetime
log_modifieddatetime
-
-- 注意此处0.7.0+ 增加字段 context
-CREATE TABLE `undo_log` (
-  `id` bigint(20) NOT NULL AUTO_INCREMENT,
-  `branch_id` bigint(20) NOT NULL,
-  `xid` varchar(100) NOT NULL,
-  `context` varchar(128) NOT NULL,
-  `rollback_info` longblob NOT NULL,
-  `log_status` int(11) NOT NULL,
-  `log_created` datetime NOT NULL,
-  `log_modified` datetime NOT NULL,
-  PRIMARY KEY (`id`),
-  UNIQUE KEY `ux_undo_log` (`xid`,`branch_id`)
-) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-
-
- - - - - - diff --git a/zh-cn/docs/dev/architecture/seata_at.json b/zh-cn/docs/dev/architecture/seata_at.json deleted file mode 100644 index 0087bb4a..00000000 --- a/zh-cn/docs/dev/architecture/seata_at.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "filename": "seata_at.md", - "__html": "

Seata AT 模式

\n

前提

\n
    \n
  • 基于支持本地 ACID 事务的关系型数据库。
  • \n
  • Java 应用,通过 JDBC 访问数据库。
  • \n
\n

整体机制

\n

两阶段提交协议的演变:

\n
    \n
  • \n

    一阶段:业务数据和回滚日志记录在同一个本地事务中提交,释放本地锁和连接资源。

    \n
  • \n
  • \n

    二阶段:

    \n
      \n
    • 提交异步化,非常快速地完成。
    • \n
    • 回滚通过一阶段的回滚日志进行反向补偿。
    • \n
    \n
  • \n
\n

写隔离

\n
    \n
  • 一阶段本地事务提交前,需要确保先拿到 全局锁
  • \n
  • 拿不到 全局锁 ,不能提交本地事务。
  • \n
  • 全局锁 的尝试被限制在一定范围内,超出范围将放弃,并回滚本地事务,释放本地锁。
  • \n
\n

以一个示例来说明:

\n

两个全局事务 tx1 和 tx2,分别对 a 表的 m 字段进行更新操作,m 的初始值 1000。

\n

tx1 先开始,开启本地事务,拿到本地锁,更新操作 m = 1000 - 100 = 900。本地事务提交前,先拿到该记录的 全局锁 ,本地提交释放本地锁。\ntx2 后开始,开启本地事务,拿到本地锁,更新操作 m = 900 - 100 = 800。本地事务提交前,尝试拿该记录的 全局锁 ,tx1 全局提交前,该记录的全局锁被 tx1 持有,tx2 需要重试等待 全局锁

\n

\"Write-Isolation:

\n

tx1 二阶段全局提交,释放 全局锁 。tx2 拿到 全局锁 提交本地事务。

\n

\"Write-Isolation:

\n

如果 tx1 的二阶段全局回滚,则 tx1 需要重新获取该数据的本地锁,进行反向补偿的更新操作,实现分支的回滚。

\n

此时,如果 tx2 仍在等待该数据的 全局锁,同时持有本地锁,则 tx1 的分支回滚会失败。分支的回滚会一直重试,直到 tx2 的 全局锁 等锁超时,放弃 全局锁 并回滚本地事务释放本地锁,tx1 的分支回滚最终成功。

\n

因为整个过程 全局锁 在 tx1 结束前一直是被 tx1 持有的,所以不会发生 脏写 的问题。

\n

读隔离

\n

在数据库本地事务隔离级别 读已提交(Read Committed) 或以上的基础上,Seata(AT 模式)的默认全局隔离级别是 读未提交(Read Uncommitted)

\n

如果应用在特定场景下,必需要求全局的 读已提交 ,目前 Seata 的方式是通过 SELECT FOR UPDATE 语句的代理。

\n

\"Read

\n

SELECT FOR UPDATE 语句的执行会申请 全局锁 ,如果 全局锁 被其他事务持有,则释放本地锁(回滚 SELECT FOR UPDATE 语句的本地执行)并重试。这个过程中,查询是被 block 住的,直到 全局锁 拿到,即读取的相关数据是 已提交 的,才返回。

\n

出于总体性能上的考虑,Seata 目前的方案并没有对所有 SELECT 语句都进行代理,仅针对 FOR UPDATE 的 SELECT 语句。

\n

工作机制

\n

以一个示例来说明整个 AT 分支的工作过程。

\n

业务表:product

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
FieldTypeKey
idbigint(20)PRI
namevarchar(100)
sincevarchar(100)
\n

AT 分支事务的业务逻辑:

\n
update product set name = 'GTS' where name = 'TXC';\n
\n

一阶段

\n

过程:

\n
    \n
  1. 解析 SQL:得到 SQL 的类型(UPDATE),表(product),条件(where name = 'TXC')等相关的信息。
  2. \n
  3. 查询前镜像:根据解析得到的条件信息,生成查询语句,定位数据。
  4. \n
\n
select id, name, since from product where name = 'TXC';\n
\n

得到前镜像:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
idnamesince
1TXC2014
\n
    \n
  1. 执行业务 SQL:更新这条记录的 name 为 'GTS'。
  2. \n
  3. 查询后镜像:根据前镜像的结果,通过 主键 定位数据。
  4. \n
\n
select id, name, since from product where id = 1`;\n
\n

得到后镜像:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
idnamesince
1GTS2014
\n
    \n
  1. 插入回滚日志:把前后镜像数据以及业务 SQL 相关的信息组成一条回滚日志记录,插入到 UNDO_LOG 表中。
  2. \n
\n
{\n\t\"branchId\": 641789253,\n\t\"undoItems\": [{\n\t\t\"afterImage\": {\n\t\t\t\"rows\": [{\n\t\t\t\t\"fields\": [{\n\t\t\t\t\t\"name\": \"id\",\n\t\t\t\t\t\"type\": 4,\n\t\t\t\t\t\"value\": 1\n\t\t\t\t}, {\n\t\t\t\t\t\"name\": \"name\",\n\t\t\t\t\t\"type\": 12,\n\t\t\t\t\t\"value\": \"GTS\"\n\t\t\t\t}, {\n\t\t\t\t\t\"name\": \"since\",\n\t\t\t\t\t\"type\": 12,\n\t\t\t\t\t\"value\": \"2014\"\n\t\t\t\t}]\n\t\t\t}],\n\t\t\t\"tableName\": \"product\"\n\t\t},\n\t\t\"beforeImage\": {\n\t\t\t\"rows\": [{\n\t\t\t\t\"fields\": [{\n\t\t\t\t\t\"name\": \"id\",\n\t\t\t\t\t\"type\": 4,\n\t\t\t\t\t\"value\": 1\n\t\t\t\t}, {\n\t\t\t\t\t\"name\": \"name\",\n\t\t\t\t\t\"type\": 12,\n\t\t\t\t\t\"value\": \"TXC\"\n\t\t\t\t}, {\n\t\t\t\t\t\"name\": \"since\",\n\t\t\t\t\t\"type\": 12,\n\t\t\t\t\t\"value\": \"2014\"\n\t\t\t\t}]\n\t\t\t}],\n\t\t\t\"tableName\": \"product\"\n\t\t},\n\t\t\"sqlType\": \"UPDATE\"\n\t}],\n\t\"xid\": \"xid:xxx\"\n}\n
\n
    \n
  1. 提交前,向 TC 注册分支:申请 product 表中,主键值等于 1 的记录的 全局锁
  2. \n
  3. 本地事务提交:业务数据的更新和前面步骤中生成的 UNDO LOG 一并提交。
  4. \n
  5. 将本地事务提交的结果上报给 TC。
  6. \n
\n

二阶段-回滚

\n
    \n
  1. 收到 TC 的分支回滚请求,开启一个本地事务,执行如下操作。
  2. \n
  3. 通过 XID 和 Branch ID 查找到相应的 UNDO LOG 记录。
  4. \n
  5. 数据校验:拿 UNDO LOG 中的后镜与当前数据进行比较,如果有不同,说明数据被当前全局事务之外的动作做了修改。这种情况,需要根据配置策略来做处理,详细的说明在另外的文档中介绍。
  6. \n
  7. 根据 UNDO LOG 中的前镜像和业务 SQL 的相关信息生成并执行回滚的语句:
  8. \n
\n
update product set name = 'TXC' where id = 1;\n
\n
    \n
  1. 提交本地事务。并把本地事务的执行结果(即分支事务回滚的结果)上报给 TC。
  2. \n
\n

二阶段-提交

\n
    \n
  1. 收到 TC 的分支提交请求,把请求放入一个异步任务的队列中,马上返回提交成功的结果给 TC。
  2. \n
  3. 异步任务阶段的分支提交请求将异步和批量地删除相应 UNDO LOG 记录。
  4. \n
\n

附录

\n

回滚日志表

\n

UNDO_LOG Table:不同数据库在类型上会略有差别。

\n

以 MySQL 为例:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
FieldType
branch_idbigint PK
xidvarchar(100)
contextvarchar(128)
rollback_infolongblob
log_statustinyint
log_createddatetime
log_modifieddatetime
\n
-- 注意此处0.7.0+ 增加字段 context\nCREATE TABLE `undo_log` (\n  `id` bigint(20) NOT NULL AUTO_INCREMENT,\n  `branch_id` bigint(20) NOT NULL,\n  `xid` varchar(100) NOT NULL,\n  `context` varchar(128) NOT NULL,\n  `rollback_info` longblob NOT NULL,\n  `log_status` int(11) NOT NULL,\n  `log_created` datetime NOT NULL,\n  `log_modified` datetime NOT NULL,\n  PRIMARY KEY (`id`),\n  UNIQUE KEY `ux_undo_log` (`xid`,`branch_id`)\n) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;\n
\n", - "link": "/zh-cn/docs/dev/architecture/seata_at.html", - "meta": {} -} \ No newline at end of file diff --git a/zh-cn/docs/dev/architecture/seata_mertics.html b/zh-cn/docs/dev/architecture/seata_mertics.html deleted file mode 100644 index 50608f9e..00000000 --- a/zh-cn/docs/dev/architecture/seata_mertics.html +++ /dev/null @@ -1,213 +0,0 @@ - - - - - - - - - - seata_mertics - - - - -
文档

Metrics

-

设计思路

-
    -
  1. Seata作为一个被集成的数据一致性框架,Metrics模块将尽可能少的使用第三方依赖以降低发生冲突的风险;
  2. -
  3. Metrics模块将竭力争取更高的度量性能和更低的资源开销,尽可能降低开启后带来的副作用;
  4. -
  5. 插件式——Metrics是否激活、数据如何发布,去取决于是否引入了对应的依赖,例如在TC Server中引入seata-metrics-prometheus,则自动启用并将度量数据发布到Prometheus
  6. -
  7. 不使用Spring,使用SPI(Service Provider Interface)加载扩展;
  8. -
  9. 初始仅发布核心Transaction相关指标,之后结合社区的需求,逐步完善运维方面的所有其他指标。
  10. -
-

模块说明

-

由1个核心API模块seata-metrics-api和N个对接实现模块如seata-metrics-prometheus构成:

-
    -
  • seata-metrics-api模块
  • -
-

此模块是Metrics的核心,将作为Seata基础架构的一部分被TC、TM和RM引用,它内部没有任何具体实现代码,仅包含接口定义,定义的内容包括:

-
    -
  1. Meter类接口:GaugeCounterTimer...
  2. -
  3. 注册容器接口Registry
  4. -
  5. Measurement发布接口Publisher
  6. -
-
-

提示:Metrics本身在开源领域也已有很多实现,例如

-
    -
  1. Netflix-Spectator
  2. -
  3. Dropwizard-Metrics
  4. -
  5. Dubbo-Metrics
  6. -
-
-
-

它们有的轻而敏捷,有的重而强大,由于也是“实现”,因此不会纳入seata-metrics-api中,避免实现绑定。

-
-
    -
  • seata-metrics-prometheus模块
  • -
-

这是我们默认提供的Metrics实现,不使用其它Metrics开源实现,并轻量级的实现了以下三个Meter:

- - - - - - - - - - - - - - - - - - - - - - - - - -
Meter类型描述
Gauge单一最新值度量器
Counter单一累加度量器,可增可减
Summary多Measurement输出计数器,将输出total(合计)、count(计数)、max(最大)、average(合计/计数)和tps(合计/时间间隔),无单位
Timer多Measurement输出计时器,将输出total(合计)、count(计数)、max(最大)和average(合计/计数),支持微秒为单位累计
-
-

说明:

-
    -
  1. 未来可能增加更丰富复杂的度量器例如Histogram,这是一种可以本地统计聚合75th, 90th, 95th, 98th, 99th,99.9th...的度量器,适合某些场合,但需要更多内存。
  2. -
  3. 所有的计量器都将继承自Meter,所有的计量器执行measure()方法后,都将归一化的生成1或N个Measurement结果。
  4. -
-
-

它也会实现一个内存的Registry和PrometheusExporter,将度量数据同步给Prometheus。

-
-

说明:不同的监控系统,采集度量数据的方式不尽相同,例如Zabbix支持用zabbix-agent推送,Prometheus则推荐使用prometheus-server拉取的方式;同样数据交换协议也不同,因此往往需要逐一适配。

-
-

如何使用

-
引入依赖
-

如果需要开启TC的Metrics,只需要在seata-server的pom中增加:

-
<dependencies>
-	<dependency>
-		<groupId>${project.groupId}</groupId>
-		<artifactId>seata-core</artifactId>
-	</dependency>
-	<!--导入依赖,启用Metrics-->
-	<dependency>
-		<groupId>${project.groupId}</groupId>
-		<artifactId>seata-metrics-prometheus</artifactId>
-	</dependency>
-	<dependency>
-		<groupId>commons-lang</groupId>
-		<artifactId>commons-lang</artifactId>
-	</dependency>
-	<dependency>
-		<groupId>org.testng</groupId>
-		<artifactId>testng</artifactId>
-		<scope>test</scope>
-	</dependency>
-</dependencies>
-
-

之后启动TC,即可在http://tc-server-ip:9898/metrics上获取到Metrics的文本格式数据。

-
-

提示:默认使用9898端口,Prometheus已登记的端口列表在此,如果想更换端口,可通过metrics.exporter.prometheus.port配置修改。

-
-
下载并启动Prometheus
-

下载完毕后,修改Prometheus的配置文件prometheus.yml,在scrape_configs中增加一项抓取Seata的度量数据:

-
scrape_configs:
-  # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
-  - job_name: 'prometheus'
-
-    # metrics_path defaults to '/metrics'
-    # scheme defaults to 'http'.
-
-    static_configs:
-    - targets: ['localhost:9090']
-
-  - job_name: 'seata'
-
-    # metrics_path defaults to '/metrics'
-    # scheme defaults to 'http'.
-
-    static_configs:
-    - targets: ['tc-server-ip:9898']
-
-
查看数据输出
-

推荐结合配置Grafana获得更好的查询效果,初期Seata导出的Metrics包括:

-
    -
  • TC :
  • -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Metrics描述
seata.transaction(role=tc,meter=counter,status=active/committed/rollback)当前活动中/已提交/已回滚的事务总数
seata.transaction(role=tc,meter=summary,statistic=count,status=committed/rollback)当前周期内提交/回滚的事务数
seata.transaction(role=tc,meter=summary,statistic=tps,status=committed/rollback)当前周期内提交/回滚的事务TPS(transaction per second)
seata.transaction(role=tc,meter=timer,statistic=total,status=committed/rollback)当前周期内提交/回滚的事务耗时总和
seata.transaction(role=tc,meter=timer,statistic=count,status=committed/rollback)当前周期内提交/回滚的事务数
seata.transaction(role=tc,meter=timer,statistic=average,status=committed/rollback)当前周期内提交/回滚的事务平均耗时
seata.transaction(role=tc,meter=timer,statistic=max,status=committed/rollback)当前周期内提交/回滚的事务最大耗时
-
-

提示:seata.transaction(role=tc,meter=summary,statistic=count,status=committed/rollback)和seata.transaction(role=tc,meter=timer,statistic=count,status=committed/rollback)的值可能相同,但它们来源于两个不同的度量器。

-
-
    -
  • TM:
  • -
-

稍后实现,包括诸如: -seata.transaction(role=tm,name={GlobalTransactionalName},meter=counter,status=active/committed/rollback) : 以GlobalTransactionalName为维度区分不同Transactional的状态。

-
    -
  • RM:
  • -
-

稍后实现,包括诸如: -seata.transaction(role=rm,name={BranchTransactionalName},mode=at/mt,meter=counter,status=active/committed/rollback):以BranchTransactionalName为维度以及AT/MT维度区分不同分支Transactional的状态。

-

如何扩展

-

如果有下面几种情况:

-
    -
  1. 您不是使用Prometheus作为运维监控系统,但希望能够将Seata的Metrics数据集成进Dashboard中;
  2. -
  3. 您需要更复杂强大的度量器类型,这些度量器在其他Metrics实现库中已有,希望集成这些第三方依赖直接使用;
  4. -
  5. 您需要改变默认Metric的Measurement输出,例如在Timer中增加一个minsd(方差);
  6. -
  7. ...
  8. -
-

那么需要自行扩展Metrics的实现,请创建新的模块项目例如seata-metrics-xxxx,之后:

-
    -
  • 针对1:您需要实现新的Exporter;
  • -
  • 针对2:您可以改变默认Registry的实现,返回第三方的Meter计量器实现;
  • -
  • 针对3:您可以修改对应Meter的实现,包括measure()方法返回的Measurement列表。
  • -
-
- - - - - - diff --git a/zh-cn/docs/dev/architecture/seata_mertics.json b/zh-cn/docs/dev/architecture/seata_mertics.json deleted file mode 100644 index df60e3fc..00000000 --- a/zh-cn/docs/dev/architecture/seata_mertics.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "filename": "seata_mertics.md", - "__html": "

Metrics

\n

设计思路

\n
    \n
  1. Seata作为一个被集成的数据一致性框架,Metrics模块将尽可能少的使用第三方依赖以降低发生冲突的风险;
  2. \n
  3. Metrics模块将竭力争取更高的度量性能和更低的资源开销,尽可能降低开启后带来的副作用;
  4. \n
  5. 插件式——Metrics是否激活、数据如何发布,去取决于是否引入了对应的依赖,例如在TC Server中引入seata-metrics-prometheus,则自动启用并将度量数据发布到Prometheus
  6. \n
  7. 不使用Spring,使用SPI(Service Provider Interface)加载扩展;
  8. \n
  9. 初始仅发布核心Transaction相关指标,之后结合社区的需求,逐步完善运维方面的所有其他指标。
  10. \n
\n

模块说明

\n

由1个核心API模块seata-metrics-api和N个对接实现模块如seata-metrics-prometheus构成:

\n
    \n
  • seata-metrics-api模块
  • \n
\n

此模块是Metrics的核心,将作为Seata基础架构的一部分被TC、TM和RM引用,它内部没有任何具体实现代码,仅包含接口定义,定义的内容包括:

\n
    \n
  1. Meter类接口:GaugeCounterTimer...
  2. \n
  3. 注册容器接口Registry
  4. \n
  5. Measurement发布接口Publisher
  6. \n
\n
\n

提示:Metrics本身在开源领域也已有很多实现,例如

\n
    \n
  1. Netflix-Spectator
  2. \n
  3. Dropwizard-Metrics
  4. \n
  5. Dubbo-Metrics
  6. \n
\n
\n
\n

它们有的轻而敏捷,有的重而强大,由于也是“实现”,因此不会纳入seata-metrics-api中,避免实现绑定。

\n
\n
    \n
  • seata-metrics-prometheus模块
  • \n
\n

这是我们默认提供的Metrics实现,不使用其它Metrics开源实现,并轻量级的实现了以下三个Meter:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
Meter类型描述
Gauge单一最新值度量器
Counter单一累加度量器,可增可减
Summary多Measurement输出计数器,将输出total(合计)、count(计数)、max(最大)、average(合计/计数)和tps(合计/时间间隔),无单位
Timer多Measurement输出计时器,将输出total(合计)、count(计数)、max(最大)和average(合计/计数),支持微秒为单位累计
\n
\n

说明:

\n
    \n
  1. 未来可能增加更丰富复杂的度量器例如Histogram,这是一种可以本地统计聚合75th, 90th, 95th, 98th, 99th,99.9th...的度量器,适合某些场合,但需要更多内存。
  2. \n
  3. 所有的计量器都将继承自Meter,所有的计量器执行measure()方法后,都将归一化的生成1或N个Measurement结果。
  4. \n
\n
\n

它也会实现一个内存的Registry和PrometheusExporter,将度量数据同步给Prometheus。

\n
\n

说明:不同的监控系统,采集度量数据的方式不尽相同,例如Zabbix支持用zabbix-agent推送,Prometheus则推荐使用prometheus-server拉取的方式;同样数据交换协议也不同,因此往往需要逐一适配。

\n
\n

如何使用

\n
引入依赖
\n

如果需要开启TC的Metrics,只需要在seata-server的pom中增加:

\n
<dependencies>\n\t<dependency>\n\t\t<groupId>${project.groupId}</groupId>\n\t\t<artifactId>seata-core</artifactId>\n\t</dependency>\n\t<!--导入依赖,启用Metrics-->\n\t<dependency>\n\t\t<groupId>${project.groupId}</groupId>\n\t\t<artifactId>seata-metrics-prometheus</artifactId>\n\t</dependency>\n\t<dependency>\n\t\t<groupId>commons-lang</groupId>\n\t\t<artifactId>commons-lang</artifactId>\n\t</dependency>\n\t<dependency>\n\t\t<groupId>org.testng</groupId>\n\t\t<artifactId>testng</artifactId>\n\t\t<scope>test</scope>\n\t</dependency>\n</dependencies>\n
\n

之后启动TC,即可在http://tc-server-ip:9898/metrics上获取到Metrics的文本格式数据。

\n
\n

提示:默认使用9898端口,Prometheus已登记的端口列表在此,如果想更换端口,可通过metrics.exporter.prometheus.port配置修改。

\n
\n
下载并启动Prometheus
\n

下载完毕后,修改Prometheus的配置文件prometheus.yml,在scrape_configs中增加一项抓取Seata的度量数据:

\n
scrape_configs:\n  # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.\n  - job_name: 'prometheus'\n\n    # metrics_path defaults to '/metrics'\n    # scheme defaults to 'http'.\n\n    static_configs:\n    - targets: ['localhost:9090']\n\n  - job_name: 'seata'\n\n    # metrics_path defaults to '/metrics'\n    # scheme defaults to 'http'.\n\n    static_configs:\n    - targets: ['tc-server-ip:9898']\n
\n
查看数据输出
\n

推荐结合配置Grafana获得更好的查询效果,初期Seata导出的Metrics包括:

\n
    \n
  • TC :
  • \n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
Metrics描述
seata.transaction(role=tc,meter=counter,status=active/committed/rollback)当前活动中/已提交/已回滚的事务总数
seata.transaction(role=tc,meter=summary,statistic=count,status=committed/rollback)当前周期内提交/回滚的事务数
seata.transaction(role=tc,meter=summary,statistic=tps,status=committed/rollback)当前周期内提交/回滚的事务TPS(transaction per second)
seata.transaction(role=tc,meter=timer,statistic=total,status=committed/rollback)当前周期内提交/回滚的事务耗时总和
seata.transaction(role=tc,meter=timer,statistic=count,status=committed/rollback)当前周期内提交/回滚的事务数
seata.transaction(role=tc,meter=timer,statistic=average,status=committed/rollback)当前周期内提交/回滚的事务平均耗时
seata.transaction(role=tc,meter=timer,statistic=max,status=committed/rollback)当前周期内提交/回滚的事务最大耗时
\n
\n

提示:seata.transaction(role=tc,meter=summary,statistic=count,status=committed/rollback)和seata.transaction(role=tc,meter=timer,statistic=count,status=committed/rollback)的值可能相同,但它们来源于两个不同的度量器。

\n
\n
    \n
  • TM:
  • \n
\n

稍后实现,包括诸如:\nseata.transaction(role=tm,name={GlobalTransactionalName},meter=counter,status=active/committed/rollback) : 以GlobalTransactionalName为维度区分不同Transactional的状态。

\n
    \n
  • RM:
  • \n
\n

稍后实现,包括诸如:\nseata.transaction(role=rm,name={BranchTransactionalName},mode=at/mt,meter=counter,status=active/committed/rollback):以BranchTransactionalName为维度以及AT/MT维度区分不同分支Transactional的状态。

\n

如何扩展

\n

如果有下面几种情况:

\n
    \n
  1. 您不是使用Prometheus作为运维监控系统,但希望能够将Seata的Metrics数据集成进Dashboard中;
  2. \n
  3. 您需要更复杂强大的度量器类型,这些度量器在其他Metrics实现库中已有,希望集成这些第三方依赖直接使用;
  4. \n
  5. 您需要改变默认Metric的Measurement输出,例如在Timer中增加一个minsd(方差);
  6. \n
  7. ...
  8. \n
\n

那么需要自行扩展Metrics的实现,请创建新的模块项目例如seata-metrics-xxxx,之后:

\n
    \n
  • 针对1:您需要实现新的Exporter;
  • \n
  • 针对2:您可以改变默认Registry的实现,返回第三方的Meter计量器实现;
  • \n
  • 针对3:您可以修改对应Meter的实现,包括measure()方法返回的Measurement列表。
  • \n
\n", - "link": "/zh-cn/docs/dev/architecture/seata_mertics.html", - "meta": {} -} \ No newline at end of file diff --git a/zh-cn/docs/dev/architecture/seata_saga.html b/zh-cn/docs/dev/architecture/seata_saga.html deleted file mode 100644 index ba79547b..00000000 --- a/zh-cn/docs/dev/architecture/seata_saga.html +++ /dev/null @@ -1,81 +0,0 @@ - - - - - - - - - - seata_saga - - - - -
文档

SEATA Saga 模式

-

概述

-

Saga模式是SEATA提供的长事务解决方案,在Saga模式中,业务流程中每个参与者都提交本地事务,当出现某一个参与者失败则补偿前面已经成功的参与者,一阶段正向服务和二阶段补偿服务都由业务开发实现。

-

Saga模式示意图

-

理论基础:Hector & Kenneth 发表论⽂ Sagas (1987)

-

Saga的实现:

-

基于状态机引擎的 Saga 实现:

-

目前SEATA提供的Saga模式是基于状态机引擎来实现的,机制是:

-
    -
  1. 通过状态图来定义服务调用的流程并生成 json 状态语言定义文件
  2. -
  3. 状态图中一个节点可以是调用一个服务,节点可以配置它的补偿节点
  4. -
  5. 状态图 json 由状态机引擎驱动执行,当出现异常时状态引擎反向执行已成功节点对应的补偿节点将事务回滚
  6. -
-
-

注意: 异常发生时是否进行补偿也可由用户自定义决定

-
-
    -
  1. 可以实现服务编排需求,支持单项选择、并发、子流程、参数转换、参数映射、服务执行状态判断、异常捕获等功能
  2. -
-

示例状态图:

-

示例状态图

-

设计

-

状态机引擎原理:

-

状态机引擎原理

-
    -
  • 图中的状态图是先执行stateA, 再执行stataB,然后执行stateC
  • -
  • "状态"的执行是基于事件驱动的模型,stataA执行完成后,会产生路由消息放入EventQueue,事件消费端从EventQueue取出消息,执行stateB
  • -
  • 在整个状态机启动时会调用Seata Server开启分布式事务,并生产xid, 然后记录"状态机实例"启动事件到本地数据库
  • -
  • 当执行到一个"状态"时会调用Seata Server注册分支事务,并生产branchId, 然后记录"状态实例"开始执行事件到本地数据库
  • -
  • 当一个"状态"执行完成后会记录"状态实例"执行结束事件到本地数据库, 然后调用Seata Server上报分支事务的状态
  • -
  • 当整个状态机执行完成, 会记录"状态机实例"执行完成事件到本地数据库, 然后调用Seata Server提交或回滚分布式事务
  • -
-

状态机引擎设计:

-

状态机引擎设计

-

状态机引擎的设计主要分成三层, 上层依赖下层,从下往上分别是:

-
    -
  • Eventing 层: -
      -
    • 实现事件驱动架构, 可以压入事件, 并由消费端消费事件, 本层不关心事件是什么消费端执行什么,由上层实现
    • -
    -
  • -
  • ProcessController 层: -
      -
    • 由于上层的Eventing驱动一个“空”流程引擎的执行,"state"的行为和路由都未实现, 由上层实现
    • -
    -
  • -
-
-

基于以上两层理论上可以自定义扩展任何"流程"引擎

-
-
    -
  • StateMachineEngine 层: -
      -
    • 实现状态机引擎每种state的行为和路由逻辑
    • -
    • 提供 API、状态机语言仓库
    • -
    -
  • -
-
- - - - - - diff --git a/zh-cn/docs/dev/architecture/seata_saga.json b/zh-cn/docs/dev/architecture/seata_saga.json deleted file mode 100644 index 1db1de8d..00000000 --- a/zh-cn/docs/dev/architecture/seata_saga.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "filename": "seata_saga.md", - "__html": "

SEATA Saga 模式

\n

概述

\n

Saga模式是SEATA提供的长事务解决方案,在Saga模式中,业务流程中每个参与者都提交本地事务,当出现某一个参与者失败则补偿前面已经成功的参与者,一阶段正向服务和二阶段补偿服务都由业务开发实现。

\n

\"Saga模式示意图\"

\n

理论基础:Hector & Kenneth 发表论⽂ Sagas (1987)

\n

Saga的实现:

\n

基于状态机引擎的 Saga 实现:

\n

目前SEATA提供的Saga模式是基于状态机引擎来实现的,机制是:

\n
    \n
  1. 通过状态图来定义服务调用的流程并生成 json 状态语言定义文件
  2. \n
  3. 状态图中一个节点可以是调用一个服务,节点可以配置它的补偿节点
  4. \n
  5. 状态图 json 由状态机引擎驱动执行,当出现异常时状态引擎反向执行已成功节点对应的补偿节点将事务回滚
  6. \n
\n
\n

注意: 异常发生时是否进行补偿也可由用户自定义决定

\n
\n
    \n
  1. 可以实现服务编排需求,支持单项选择、并发、子流程、参数转换、参数映射、服务执行状态判断、异常捕获等功能
  2. \n
\n

示例状态图:

\n

\"示例状态图\"

\n

设计

\n

状态机引擎原理:

\n

\"状态机引擎原理\"

\n
    \n
  • 图中的状态图是先执行stateA, 再执行stataB,然后执行stateC
  • \n
  • "状态"的执行是基于事件驱动的模型,stataA执行完成后,会产生路由消息放入EventQueue,事件消费端从EventQueue取出消息,执行stateB
  • \n
  • 在整个状态机启动时会调用Seata Server开启分布式事务,并生产xid, 然后记录"状态机实例"启动事件到本地数据库
  • \n
  • 当执行到一个"状态"时会调用Seata Server注册分支事务,并生产branchId, 然后记录"状态实例"开始执行事件到本地数据库
  • \n
  • 当一个"状态"执行完成后会记录"状态实例"执行结束事件到本地数据库, 然后调用Seata Server上报分支事务的状态
  • \n
  • 当整个状态机执行完成, 会记录"状态机实例"执行完成事件到本地数据库, 然后调用Seata Server提交或回滚分布式事务
  • \n
\n

状态机引擎设计:

\n

\"状态机引擎设计\"

\n

状态机引擎的设计主要分成三层, 上层依赖下层,从下往上分别是:

\n
    \n
  • Eventing 层:\n
      \n
    • 实现事件驱动架构, 可以压入事件, 并由消费端消费事件, 本层不关心事件是什么消费端执行什么,由上层实现
    • \n
    \n
  • \n
  • ProcessController 层:\n
      \n
    • 由于上层的Eventing驱动一个“空”流程引擎的执行,"state"的行为和路由都未实现, 由上层实现
    • \n
    \n
  • \n
\n
\n

基于以上两层理论上可以自定义扩展任何"流程"引擎

\n
\n
    \n
  • StateMachineEngine 层:\n
      \n
    • 实现状态机引擎每种state的行为和路由逻辑
    • \n
    • 提供 API、状态机语言仓库
    • \n
    \n
  • \n
\n", - "link": "/zh-cn/docs/dev/architecture/seata_saga.html", - "meta": {} -} \ No newline at end of file diff --git a/zh-cn/docs/dev/architecture/seata_tcc.html b/zh-cn/docs/dev/architecture/seata_tcc.html deleted file mode 100644 index 4c3935e0..00000000 --- a/zh-cn/docs/dev/architecture/seata_tcc.html +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - seata_tcc - - - - -
文档

Seata TCC 模式

-

回顾总览中的描述:一个分布式的全局事务,整体是 两阶段提交 的模型。全局事务是由若干分支事务组成的,分支事务要满足 两阶段提交 的模型要求,即需要每个分支事务都具备自己的:

-
    -
  • 一阶段 prepare 行为
  • -
  • 二阶段 commit 或 rollback 行为
  • -
-

Overview of a global transaction

-

根据两阶段行为模式的不同,我们将分支事务划分为 Automatic (Branch) Transaction ModeTCC (Branch) Transaction Mode.

-

AT 模式(参考链接 TBD)基于 支持本地 ACID 事务关系型数据库

-
    -
  • 一阶段 prepare 行为:在本地事务中,一并提交业务数据更新和相应回滚日志记录。
  • -
  • 二阶段 commit 行为:马上成功结束,自动 异步批量清理回滚日志。
  • -
  • 二阶段 rollback 行为:通过回滚日志,自动 生成补偿操作,完成数据回滚。
  • -
-

相应的,TCC 模式,不依赖于底层数据资源的事务支持:

-
    -
  • 一阶段 prepare 行为:调用 自定义 的 prepare 逻辑。
  • -
  • 二阶段 commit 行为:调用 自定义 的 commit 逻辑。
  • -
  • 二阶段 rollback 行为:调用 自定义 的 rollback 逻辑。
  • -
-

所谓 TCC 模式,是指支持把 自定义 的分支事务纳入到全局事务的管理中。

-
- - - - - - diff --git a/zh-cn/docs/dev/architecture/seata_tcc.json b/zh-cn/docs/dev/architecture/seata_tcc.json deleted file mode 100644 index 068191f1..00000000 --- a/zh-cn/docs/dev/architecture/seata_tcc.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "filename": "seata_tcc.md", - "__html": "

Seata TCC 模式

\n

回顾总览中的描述:一个分布式的全局事务,整体是 两阶段提交 的模型。全局事务是由若干分支事务组成的,分支事务要满足 两阶段提交 的模型要求,即需要每个分支事务都具备自己的:

\n
    \n
  • 一阶段 prepare 行为
  • \n
  • 二阶段 commit 或 rollback 行为
  • \n
\n

\"Overview

\n

根据两阶段行为模式的不同,我们将分支事务划分为 Automatic (Branch) Transaction ModeTCC (Branch) Transaction Mode.

\n

AT 模式(参考链接 TBD)基于 支持本地 ACID 事务关系型数据库

\n
    \n
  • 一阶段 prepare 行为:在本地事务中,一并提交业务数据更新和相应回滚日志记录。
  • \n
  • 二阶段 commit 行为:马上成功结束,自动 异步批量清理回滚日志。
  • \n
  • 二阶段 rollback 行为:通过回滚日志,自动 生成补偿操作,完成数据回滚。
  • \n
\n

相应的,TCC 模式,不依赖于底层数据资源的事务支持:

\n
    \n
  • 一阶段 prepare 行为:调用 自定义 的 prepare 逻辑。
  • \n
  • 二阶段 commit 行为:调用 自定义 的 commit 逻辑。
  • \n
  • 二阶段 rollback 行为:调用 自定义 的 rollback 逻辑。
  • \n
\n

所谓 TCC 模式,是指支持把 自定义 的分支事务纳入到全局事务的管理中。

\n", - "link": "/zh-cn/docs/dev/architecture/seata_tcc.html", - "meta": {} -} \ No newline at end of file diff --git a/zh-cn/docs/dev/mode/at-mode.html b/zh-cn/docs/dev/mode/at-mode.html deleted file mode 100644 index cce938e9..00000000 --- a/zh-cn/docs/dev/mode/at-mode.html +++ /dev/null @@ -1,279 +0,0 @@ - - - - - - - - - - Seata AT 模式 - - - - -
文档

Seata AT 模式

-

前提

-
    -
  • 基于支持本地 ACID 事务的关系型数据库。
  • -
  • Java 应用,通过 JDBC 访问数据库。
  • -
-

整体机制

-

两阶段提交协议的演变:

-
    -
  • -

    一阶段:业务数据和回滚日志记录在同一个本地事务中提交,释放本地锁和连接资源。

    -
  • -
  • -

    二阶段:

    -
      -
    • 提交异步化,非常快速地完成。
    • -
    • 回滚通过一阶段的回滚日志进行反向补偿。
    • -
    -
  • -
-

写隔离

-
    -
  • 一阶段本地事务提交前,需要确保先拿到 全局锁
  • -
  • 拿不到 全局锁 ,不能提交本地事务。
  • -
  • 全局锁 的尝试被限制在一定范围内,超出范围将放弃,并回滚本地事务,释放本地锁。
  • -
-

以一个示例来说明:

-

两个全局事务 tx1 和 tx2,分别对 a 表的 m 字段进行更新操作,m 的初始值 1000。

-

tx1 先开始,开启本地事务,拿到本地锁,更新操作 m = 1000 - 100 = 900。本地事务提交前,先拿到该记录的 全局锁 ,本地提交释放本地锁。 -tx2 后开始,开启本地事务,拿到本地锁,更新操作 m = 900 - 100 = 800。本地事务提交前,尝试拿该记录的 全局锁 ,tx1 全局提交前,该记录的全局锁被 tx1 持有,tx2 需要重试等待 全局锁

-

Write-Isolation: Commit

-

tx1 二阶段全局提交,释放 全局锁 。tx2 拿到 全局锁 提交本地事务。

-

Write-Isolation: Rollback

-

如果 tx1 的二阶段全局回滚,则 tx1 需要重新获取该数据的本地锁,进行反向补偿的更新操作,实现分支的回滚。

-

此时,如果 tx2 仍在等待该数据的 全局锁,同时持有本地锁,则 tx1 的分支回滚会失败。分支的回滚会一直重试,直到 tx2 的 全局锁 等锁超时,放弃 全局锁 并回滚本地事务释放本地锁,tx1 的分支回滚最终成功。

-

因为整个过程 全局锁 在 tx1 结束前一直是被 tx1 持有的,所以不会发生 脏写 的问题。

-

读隔离

-

在数据库本地事务隔离级别 读已提交(Read Committed) 或以上的基础上,Seata(AT 模式)的默认全局隔离级别是 读未提交(Read Uncommitted)

-

如果应用在特定场景下,必需要求全局的 读已提交 ,目前 Seata 的方式是通过 SELECT FOR UPDATE 语句的代理。

-

Read Isolation: SELECT FOR UPDATE

-

SELECT FOR UPDATE 语句的执行会申请 全局锁 ,如果 全局锁 被其他事务持有,则释放本地锁(回滚 SELECT FOR UPDATE 语句的本地执行)并重试。这个过程中,查询是被 block 住的,直到 全局锁 拿到,即读取的相关数据是 已提交 的,才返回。

-

出于总体性能上的考虑,Seata 目前的方案并没有对所有 SELECT 语句都进行代理,仅针对 FOR UPDATE 的 SELECT 语句。

-

工作机制

-

以一个示例来说明整个 AT 分支的工作过程。

-

业务表:product

- - - - - - - - - - - - - - - - - - - - - - - - - -
FieldTypeKey
idbigint(20)PRI
namevarchar(100)
sincevarchar(100)
-

AT 分支事务的业务逻辑:

-
update product set name = 'GTS' where name = 'TXC';
-
-

一阶段

-

过程:

-
    -
  1. 解析 SQL:得到 SQL 的类型(UPDATE),表(product),条件(where name = 'TXC')等相关的信息。
  2. -
  3. 查询前镜像:根据解析得到的条件信息,生成查询语句,定位数据。
  4. -
-
select id, name, since from product where name = 'TXC';
-
-

得到前镜像:

- - - - - - - - - - - - - - - -
idnamesince
1TXC2014
-
    -
  1. 执行业务 SQL:更新这条记录的 name 为 'GTS'。
  2. -
  3. 查询后镜像:根据前镜像的结果,通过 主键 定位数据。
  4. -
-
select id, name, since from product where id = 1`;
-
-

得到后镜像:

- - - - - - - - - - - - - - - -
idnamesince
1GTS2014
-
    -
  1. 插入回滚日志:把前后镜像数据以及业务 SQL 相关的信息组成一条回滚日志记录,插入到 UNDO_LOG 表中。
  2. -
-
{
-	"branchId": 641789253,
-	"undoItems": [{
-		"afterImage": {
-			"rows": [{
-				"fields": [{
-					"name": "id",
-					"type": 4,
-					"value": 1
-				}, {
-					"name": "name",
-					"type": 12,
-					"value": "GTS"
-				}, {
-					"name": "since",
-					"type": 12,
-					"value": "2014"
-				}]
-			}],
-			"tableName": "product"
-		},
-		"beforeImage": {
-			"rows": [{
-				"fields": [{
-					"name": "id",
-					"type": 4,
-					"value": 1
-				}, {
-					"name": "name",
-					"type": 12,
-					"value": "TXC"
-				}, {
-					"name": "since",
-					"type": 12,
-					"value": "2014"
-				}]
-			}],
-			"tableName": "product"
-		},
-		"sqlType": "UPDATE"
-	}],
-	"xid": "xid:xxx"
-}
-
-
    -
  1. 提交前,向 TC 注册分支:申请 product 表中,主键值等于 1 的记录的 全局锁
  2. -
  3. 本地事务提交:业务数据的更新和前面步骤中生成的 UNDO LOG 一并提交。
  4. -
  5. 将本地事务提交的结果上报给 TC。
  6. -
-

二阶段-回滚

-
    -
  1. 收到 TC 的分支回滚请求,开启一个本地事务,执行如下操作。
  2. -
  3. 通过 XID 和 Branch ID 查找到相应的 UNDO LOG 记录。
  4. -
  5. 数据校验:拿 UNDO LOG 中的后镜与当前数据进行比较,如果有不同,说明数据被当前全局事务之外的动作做了修改。这种情况,需要根据配置策略来做处理,详细的说明在另外的文档中介绍。
  6. -
  7. 根据 UNDO LOG 中的前镜像和业务 SQL 的相关信息生成并执行回滚的语句:
  8. -
-
update product set name = 'TXC' where id = 1;
-
-
    -
  1. 提交本地事务。并把本地事务的执行结果(即分支事务回滚的结果)上报给 TC。
  2. -
-

二阶段-提交

-
    -
  1. 收到 TC 的分支提交请求,把请求放入一个异步任务的队列中,马上返回提交成功的结果给 TC。
  2. -
  3. 异步任务阶段的分支提交请求将异步和批量地删除相应 UNDO LOG 记录。
  4. -
-

附录

-

回滚日志表

-

UNDO_LOG Table:不同数据库在类型上会略有差别。

-

以 MySQL 为例:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldType
branch_idbigint PK
xidvarchar(100)
contextvarchar(128)
rollback_infolongblob
log_statustinyint
log_createddatetime
log_modifieddatetime
-
-- 注意此处0.7.0+ 增加字段 context
-CREATE TABLE `undo_log` (
-  `id` bigint(20) NOT NULL AUTO_INCREMENT,
-  `branch_id` bigint(20) NOT NULL,
-  `xid` varchar(100) NOT NULL,
-  `context` varchar(128) NOT NULL,
-  `rollback_info` longblob NOT NULL,
-  `log_status` int(11) NOT NULL,
-  `log_created` datetime NOT NULL,
-  `log_modified` datetime NOT NULL,
-  PRIMARY KEY (`id`),
-  UNIQUE KEY `ux_undo_log` (`xid`,`branch_id`)
-) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-
-
- - - - - - - diff --git a/zh-cn/docs/dev/mode/at-mode.json b/zh-cn/docs/dev/mode/at-mode.json deleted file mode 100644 index 59095d70..00000000 --- a/zh-cn/docs/dev/mode/at-mode.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "at-mode.md", - "__html": "

Seata AT 模式

\n

前提

\n
    \n
  • 基于支持本地 ACID 事务的关系型数据库。
  • \n
  • Java 应用,通过 JDBC 访问数据库。
  • \n
\n

整体机制

\n

两阶段提交协议的演变:

\n
    \n
  • \n

    一阶段:业务数据和回滚日志记录在同一个本地事务中提交,释放本地锁和连接资源。

    \n
  • \n
  • \n

    二阶段:

    \n
      \n
    • 提交异步化,非常快速地完成。
    • \n
    • 回滚通过一阶段的回滚日志进行反向补偿。
    • \n
    \n
  • \n
\n

写隔离

\n
    \n
  • 一阶段本地事务提交前,需要确保先拿到 全局锁
  • \n
  • 拿不到 全局锁 ,不能提交本地事务。
  • \n
  • 全局锁 的尝试被限制在一定范围内,超出范围将放弃,并回滚本地事务,释放本地锁。
  • \n
\n

以一个示例来说明:

\n

两个全局事务 tx1 和 tx2,分别对 a 表的 m 字段进行更新操作,m 的初始值 1000。

\n

tx1 先开始,开启本地事务,拿到本地锁,更新操作 m = 1000 - 100 = 900。本地事务提交前,先拿到该记录的 全局锁 ,本地提交释放本地锁。\ntx2 后开始,开启本地事务,拿到本地锁,更新操作 m = 900 - 100 = 800。本地事务提交前,尝试拿该记录的 全局锁 ,tx1 全局提交前,该记录的全局锁被 tx1 持有,tx2 需要重试等待 全局锁

\n

\"Write-Isolation:

\n

tx1 二阶段全局提交,释放 全局锁 。tx2 拿到 全局锁 提交本地事务。

\n

\"Write-Isolation:

\n

如果 tx1 的二阶段全局回滚,则 tx1 需要重新获取该数据的本地锁,进行反向补偿的更新操作,实现分支的回滚。

\n

此时,如果 tx2 仍在等待该数据的 全局锁,同时持有本地锁,则 tx1 的分支回滚会失败。分支的回滚会一直重试,直到 tx2 的 全局锁 等锁超时,放弃 全局锁 并回滚本地事务释放本地锁,tx1 的分支回滚最终成功。

\n

因为整个过程 全局锁 在 tx1 结束前一直是被 tx1 持有的,所以不会发生 脏写 的问题。

\n

读隔离

\n

在数据库本地事务隔离级别 读已提交(Read Committed) 或以上的基础上,Seata(AT 模式)的默认全局隔离级别是 读未提交(Read Uncommitted)

\n

如果应用在特定场景下,必需要求全局的 读已提交 ,目前 Seata 的方式是通过 SELECT FOR UPDATE 语句的代理。

\n

\"Read

\n

SELECT FOR UPDATE 语句的执行会申请 全局锁 ,如果 全局锁 被其他事务持有,则释放本地锁(回滚 SELECT FOR UPDATE 语句的本地执行)并重试。这个过程中,查询是被 block 住的,直到 全局锁 拿到,即读取的相关数据是 已提交 的,才返回。

\n

出于总体性能上的考虑,Seata 目前的方案并没有对所有 SELECT 语句都进行代理,仅针对 FOR UPDATE 的 SELECT 语句。

\n

工作机制

\n

以一个示例来说明整个 AT 分支的工作过程。

\n

业务表:product

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
FieldTypeKey
idbigint(20)PRI
namevarchar(100)
sincevarchar(100)
\n

AT 分支事务的业务逻辑:

\n
update product set name = 'GTS' where name = 'TXC';\n
\n

一阶段

\n

过程:

\n
    \n
  1. 解析 SQL:得到 SQL 的类型(UPDATE),表(product),条件(where name = 'TXC')等相关的信息。
  2. \n
  3. 查询前镜像:根据解析得到的条件信息,生成查询语句,定位数据。
  4. \n
\n
select id, name, since from product where name = 'TXC';\n
\n

得到前镜像:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
idnamesince
1TXC2014
\n
    \n
  1. 执行业务 SQL:更新这条记录的 name 为 'GTS'。
  2. \n
  3. 查询后镜像:根据前镜像的结果,通过 主键 定位数据。
  4. \n
\n
select id, name, since from product where id = 1`;\n
\n

得到后镜像:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
idnamesince
1GTS2014
\n
    \n
  1. 插入回滚日志:把前后镜像数据以及业务 SQL 相关的信息组成一条回滚日志记录,插入到 UNDO_LOG 表中。
  2. \n
\n
{\n\t\"branchId\": 641789253,\n\t\"undoItems\": [{\n\t\t\"afterImage\": {\n\t\t\t\"rows\": [{\n\t\t\t\t\"fields\": [{\n\t\t\t\t\t\"name\": \"id\",\n\t\t\t\t\t\"type\": 4,\n\t\t\t\t\t\"value\": 1\n\t\t\t\t}, {\n\t\t\t\t\t\"name\": \"name\",\n\t\t\t\t\t\"type\": 12,\n\t\t\t\t\t\"value\": \"GTS\"\n\t\t\t\t}, {\n\t\t\t\t\t\"name\": \"since\",\n\t\t\t\t\t\"type\": 12,\n\t\t\t\t\t\"value\": \"2014\"\n\t\t\t\t}]\n\t\t\t}],\n\t\t\t\"tableName\": \"product\"\n\t\t},\n\t\t\"beforeImage\": {\n\t\t\t\"rows\": [{\n\t\t\t\t\"fields\": [{\n\t\t\t\t\t\"name\": \"id\",\n\t\t\t\t\t\"type\": 4,\n\t\t\t\t\t\"value\": 1\n\t\t\t\t}, {\n\t\t\t\t\t\"name\": \"name\",\n\t\t\t\t\t\"type\": 12,\n\t\t\t\t\t\"value\": \"TXC\"\n\t\t\t\t}, {\n\t\t\t\t\t\"name\": \"since\",\n\t\t\t\t\t\"type\": 12,\n\t\t\t\t\t\"value\": \"2014\"\n\t\t\t\t}]\n\t\t\t}],\n\t\t\t\"tableName\": \"product\"\n\t\t},\n\t\t\"sqlType\": \"UPDATE\"\n\t}],\n\t\"xid\": \"xid:xxx\"\n}\n
\n
    \n
  1. 提交前,向 TC 注册分支:申请 product 表中,主键值等于 1 的记录的 全局锁
  2. \n
  3. 本地事务提交:业务数据的更新和前面步骤中生成的 UNDO LOG 一并提交。
  4. \n
  5. 将本地事务提交的结果上报给 TC。
  6. \n
\n

二阶段-回滚

\n
    \n
  1. 收到 TC 的分支回滚请求,开启一个本地事务,执行如下操作。
  2. \n
  3. 通过 XID 和 Branch ID 查找到相应的 UNDO LOG 记录。
  4. \n
  5. 数据校验:拿 UNDO LOG 中的后镜与当前数据进行比较,如果有不同,说明数据被当前全局事务之外的动作做了修改。这种情况,需要根据配置策略来做处理,详细的说明在另外的文档中介绍。
  6. \n
  7. 根据 UNDO LOG 中的前镜像和业务 SQL 的相关信息生成并执行回滚的语句:
  8. \n
\n
update product set name = 'TXC' where id = 1;\n
\n
    \n
  1. 提交本地事务。并把本地事务的执行结果(即分支事务回滚的结果)上报给 TC。
  2. \n
\n

二阶段-提交

\n
    \n
  1. 收到 TC 的分支提交请求,把请求放入一个异步任务的队列中,马上返回提交成功的结果给 TC。
  2. \n
  3. 异步任务阶段的分支提交请求将异步和批量地删除相应 UNDO LOG 记录。
  4. \n
\n

附录

\n

回滚日志表

\n

UNDO_LOG Table:不同数据库在类型上会略有差别。

\n

以 MySQL 为例:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
FieldType
branch_idbigint PK
xidvarchar(100)
contextvarchar(128)
rollback_infolongblob
log_statustinyint
log_createddatetime
log_modifieddatetime
\n
-- 注意此处0.7.0+ 增加字段 context\nCREATE TABLE `undo_log` (\n  `id` bigint(20) NOT NULL AUTO_INCREMENT,\n  `branch_id` bigint(20) NOT NULL,\n  `xid` varchar(100) NOT NULL,\n  `context` varchar(128) NOT NULL,\n  `rollback_info` longblob NOT NULL,\n  `log_status` int(11) NOT NULL,\n  `log_created` datetime NOT NULL,\n  `log_modified` datetime NOT NULL,\n  PRIMARY KEY (`id`),\n  UNIQUE KEY `ux_undo_log` (`xid`,`branch_id`)\n) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;\n
\n", - "link": "/zh-cn/docs/dev/mode/at-mode.html", - "meta": { - "title": "Seata AT 模式", - "keywords": "Seata", - "description": "Seata AT 模式。" - } -} \ No newline at end of file diff --git a/zh-cn/docs/dev/mode/saga-mode.html b/zh-cn/docs/dev/mode/saga-mode.html deleted file mode 100644 index c702e67f..00000000 --- a/zh-cn/docs/dev/mode/saga-mode.html +++ /dev/null @@ -1,90 +0,0 @@ - - - - - - - - - - Seata Saga 模式 - - - - -
文档

SEATA Saga 模式

-

概述

-

Saga模式是SEATA提供的长事务解决方案,在Saga模式中,业务流程中每个参与者都提交本地事务,当出现某一个参与者失败则补偿前面已经成功的参与者,一阶段正向服务和二阶段补偿服务都由业务开发实现。

-

Saga模式示意图

-

理论基础:Hector & Kenneth 发表论⽂ Sagas (1987)

-

Saga的实现:

-

基于状态机引擎的 Saga 实现:

-

目前SEATA提供的Saga模式是基于状态机引擎来实现的,机制是:

-
    -
  1. 通过状态图来定义服务调用的流程并生成 json 状态语言定义文件
  2. -
  3. 状态图中一个节点可以是调用一个服务,节点可以配置它的补偿节点
  4. -
  5. 状态图 json 由状态机引擎驱动执行,当出现异常时状态引擎反向执行已成功节点对应的补偿节点将事务回滚
  6. -
-
-

注意: 异常发生时是否进行补偿也可由用户自定义决定

-
-
    -
  1. 可以实现服务编排需求,支持单项选择、并发、子流程、参数转换、参数映射、服务执行状态判断、异常捕获等功能
  2. -
-

示例状态图:

-

示例状态图

-

设计

-

状态机引擎原理:

-

状态机引擎原理

-
    -
  • 图中的状态图是先执行stateA, 再执行stataB,然后执行stateC
  • -
  • "状态"的执行是基于事件驱动的模型,stataA执行完成后,会产生路由消息放入EventQueue,事件消费端从EventQueue取出消息,执行stateB
  • -
  • 在整个状态机启动时会调用Seata Server开启分布式事务,并生产xid, 然后记录"状态机实例"启动事件到本地数据库
  • -
  • 当执行到一个"状态"时会调用Seata Server注册分支事务,并生产branchId, 然后记录"状态实例"开始执行事件到本地数据库
  • -
  • 当一个"状态"执行完成后会记录"状态实例"执行结束事件到本地数据库, 然后调用Seata Server上报分支事务的状态
  • -
  • 当整个状态机执行完成, 会记录"状态机实例"执行完成事件到本地数据库, 然后调用Seata Server提交或回滚分布式事务
  • -
-

状态机引擎设计:

-

状态机引擎设计

-

状态机引擎的设计主要分成三层, 上层依赖下层,从下往上分别是:

-
    -
  • Eventing 层: -
      -
    • 实现事件驱动架构, 可以压入事件, 并由消费端消费事件, 本层不关心事件是什么消费端执行什么,由上层实现
    • -
    -
  • -
  • ProcessController 层: -
      -
    • 由于上层的Eventing驱动一个“空”流程引擎的执行,"state"的行为和路由都未实现, 由上层实现
    • -
    -
  • -
-
-

基于以上两层理论上可以自定义扩展任何"流程"引擎

-
-
    -
  • StateMachineEngine 层: -
      -
    • 实现状态机引擎每种state的行为和路由逻辑
    • -
    • 提供 API、状态机语言仓库
    • -
    -
  • -
-
- - - - - - - diff --git a/zh-cn/docs/dev/mode/saga-mode.json b/zh-cn/docs/dev/mode/saga-mode.json deleted file mode 100644 index dc8b69a2..00000000 --- a/zh-cn/docs/dev/mode/saga-mode.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "saga-mode.md", - "__html": "

SEATA Saga 模式

\n

概述

\n

Saga模式是SEATA提供的长事务解决方案,在Saga模式中,业务流程中每个参与者都提交本地事务,当出现某一个参与者失败则补偿前面已经成功的参与者,一阶段正向服务和二阶段补偿服务都由业务开发实现。

\n

\"Saga模式示意图\"

\n

理论基础:Hector & Kenneth 发表论⽂ Sagas (1987)

\n

Saga的实现:

\n

基于状态机引擎的 Saga 实现:

\n

目前SEATA提供的Saga模式是基于状态机引擎来实现的,机制是:

\n
    \n
  1. 通过状态图来定义服务调用的流程并生成 json 状态语言定义文件
  2. \n
  3. 状态图中一个节点可以是调用一个服务,节点可以配置它的补偿节点
  4. \n
  5. 状态图 json 由状态机引擎驱动执行,当出现异常时状态引擎反向执行已成功节点对应的补偿节点将事务回滚
  6. \n
\n
\n

注意: 异常发生时是否进行补偿也可由用户自定义决定

\n
\n
    \n
  1. 可以实现服务编排需求,支持单项选择、并发、子流程、参数转换、参数映射、服务执行状态判断、异常捕获等功能
  2. \n
\n

示例状态图:

\n

\"示例状态图\"

\n

设计

\n

状态机引擎原理:

\n

\"状态机引擎原理\"

\n
    \n
  • 图中的状态图是先执行stateA, 再执行stataB,然后执行stateC
  • \n
  • "状态"的执行是基于事件驱动的模型,stataA执行完成后,会产生路由消息放入EventQueue,事件消费端从EventQueue取出消息,执行stateB
  • \n
  • 在整个状态机启动时会调用Seata Server开启分布式事务,并生产xid, 然后记录"状态机实例"启动事件到本地数据库
  • \n
  • 当执行到一个"状态"时会调用Seata Server注册分支事务,并生产branchId, 然后记录"状态实例"开始执行事件到本地数据库
  • \n
  • 当一个"状态"执行完成后会记录"状态实例"执行结束事件到本地数据库, 然后调用Seata Server上报分支事务的状态
  • \n
  • 当整个状态机执行完成, 会记录"状态机实例"执行完成事件到本地数据库, 然后调用Seata Server提交或回滚分布式事务
  • \n
\n

状态机引擎设计:

\n

\"状态机引擎设计\"

\n

状态机引擎的设计主要分成三层, 上层依赖下层,从下往上分别是:

\n
    \n
  • Eventing 层:\n
      \n
    • 实现事件驱动架构, 可以压入事件, 并由消费端消费事件, 本层不关心事件是什么消费端执行什么,由上层实现
    • \n
    \n
  • \n
  • ProcessController 层:\n
      \n
    • 由于上层的Eventing驱动一个“空”流程引擎的执行,"state"的行为和路由都未实现, 由上层实现
    • \n
    \n
  • \n
\n
\n

基于以上两层理论上可以自定义扩展任何"流程"引擎

\n
\n
    \n
  • StateMachineEngine 层:\n
      \n
    • 实现状态机引擎每种state的行为和路由逻辑
    • \n
    • 提供 API、状态机语言仓库
    • \n
    \n
  • \n
\n", - "link": "/zh-cn/docs/dev/mode/saga-mode.html", - "meta": { - "title": "Seata Saga 模式", - "keywords": "Seata", - "description": "Seata Saga 模式。" - } -} \ No newline at end of file diff --git a/zh-cn/docs/dev/mode/tcc-mode.html b/zh-cn/docs/dev/mode/tcc-mode.html deleted file mode 100644 index 89adc982..00000000 --- a/zh-cn/docs/dev/mode/tcc-mode.html +++ /dev/null @@ -1,53 +0,0 @@ - - - - - - - - - - Seata Tcc 模式 - - - - -
文档

Seata TCC 模式

-

回顾总览中的描述:一个分布式的全局事务,整体是 两阶段提交 的模型。全局事务是由若干分支事务组成的,分支事务要满足 两阶段提交 的模型要求,即需要每个分支事务都具备自己的:

-
    -
  • 一阶段 prepare 行为
  • -
  • 二阶段 commit 或 rollback 行为
  • -
-

Overview of a global transaction

-

根据两阶段行为模式的不同,我们将分支事务划分为 Automatic (Branch) Transaction ModeTCC (Branch) Transaction Mode.

-

AT 模式(参考链接 TBD)基于 支持本地 ACID 事务关系型数据库

-
    -
  • 一阶段 prepare 行为:在本地事务中,一并提交业务数据更新和相应回滚日志记录。
  • -
  • 二阶段 commit 行为:马上成功结束,自动 异步批量清理回滚日志。
  • -
  • 二阶段 rollback 行为:通过回滚日志,自动 生成补偿操作,完成数据回滚。
  • -
-

相应的,TCC 模式,不依赖于底层数据资源的事务支持:

-
    -
  • 一阶段 prepare 行为:调用 自定义 的 prepare 逻辑。
  • -
  • 二阶段 commit 行为:调用 自定义 的 commit 逻辑。
  • -
  • 二阶段 rollback 行为:调用 自定义 的 rollback 逻辑。
  • -
-

所谓 TCC 模式,是指支持把 自定义 的分支事务纳入到全局事务的管理中。

-
- - - - - - - diff --git a/zh-cn/docs/dev/mode/tcc-mode.json b/zh-cn/docs/dev/mode/tcc-mode.json deleted file mode 100644 index 0f4feb12..00000000 --- a/zh-cn/docs/dev/mode/tcc-mode.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "tcc-mode.md", - "__html": "

Seata TCC 模式

\n

回顾总览中的描述:一个分布式的全局事务,整体是 两阶段提交 的模型。全局事务是由若干分支事务组成的,分支事务要满足 两阶段提交 的模型要求,即需要每个分支事务都具备自己的:

\n
    \n
  • 一阶段 prepare 行为
  • \n
  • 二阶段 commit 或 rollback 行为
  • \n
\n

\"Overview

\n

根据两阶段行为模式的不同,我们将分支事务划分为 Automatic (Branch) Transaction ModeTCC (Branch) Transaction Mode.

\n

AT 模式(参考链接 TBD)基于 支持本地 ACID 事务关系型数据库

\n
    \n
  • 一阶段 prepare 行为:在本地事务中,一并提交业务数据更新和相应回滚日志记录。
  • \n
  • 二阶段 commit 行为:马上成功结束,自动 异步批量清理回滚日志。
  • \n
  • 二阶段 rollback 行为:通过回滚日志,自动 生成补偿操作,完成数据回滚。
  • \n
\n

相应的,TCC 模式,不依赖于底层数据资源的事务支持:

\n
    \n
  • 一阶段 prepare 行为:调用 自定义 的 prepare 逻辑。
  • \n
  • 二阶段 commit 行为:调用 自定义 的 commit 逻辑。
  • \n
  • 二阶段 rollback 行为:调用 自定义 的 rollback 逻辑。
  • \n
\n

所谓 TCC 模式,是指支持把 自定义 的分支事务纳入到全局事务的管理中。

\n", - "link": "/zh-cn/docs/dev/mode/tcc-mode.html", - "meta": { - "title": "Seata Tcc 模式", - "keywords": "Seata", - "description": "Seata Tcc 模式。" - } -} \ No newline at end of file diff --git a/zh-cn/docs/dev/seata-mertics.html b/zh-cn/docs/dev/seata-mertics.html deleted file mode 100644 index 2695f622..00000000 --- a/zh-cn/docs/dev/seata-mertics.html +++ /dev/null @@ -1,222 +0,0 @@ - - - - - - - - - - Metrics - - - - -
文档

Metrics

-

设计思路

-
    -
  1. Seata作为一个被集成的数据一致性框架,Metrics模块将尽可能少的使用第三方依赖以降低发生冲突的风险;
  2. -
  3. Metrics模块将竭力争取更高的度量性能和更低的资源开销,尽可能降低开启后带来的副作用;
  4. -
  5. 插件式——Metrics是否激活、数据如何发布,去取决于是否引入了对应的依赖,例如在TC Server中引入seata-metrics-prometheus,则自动启用并将度量数据发布到Prometheus
  6. -
  7. 不使用Spring,使用SPI(Service Provider Interface)加载扩展;
  8. -
  9. 初始仅发布核心Transaction相关指标,之后结合社区的需求,逐步完善运维方面的所有其他指标。
  10. -
-

模块说明

-

由1个核心API模块seata-metrics-api和N个对接实现模块如seata-metrics-prometheus构成:

-
    -
  • seata-metrics-api模块
  • -
-

此模块是Metrics的核心,将作为Seata基础架构的一部分被TC、TM和RM引用,它内部没有任何具体实现代码,仅包含接口定义,定义的内容包括:

-
    -
  1. Meter类接口:GaugeCounterTimer...
  2. -
  3. 注册容器接口Registry
  4. -
  5. Measurement发布接口Publisher
  6. -
-
-

提示:Metrics本身在开源领域也已有很多实现,例如

-
    -
  1. Netflix-Spectator
  2. -
  3. Dropwizard-Metrics
  4. -
  5. Dubbo-Metrics
  6. -
-
-
-

它们有的轻而敏捷,有的重而强大,由于也是“实现”,因此不会纳入seata-metrics-api中,避免实现绑定。

-
-
    -
  • seata-metrics-prometheus模块
  • -
-

这是我们默认提供的Metrics实现,不使用其它Metrics开源实现,并轻量级的实现了以下三个Meter:

- - - - - - - - - - - - - - - - - - - - - - - - - -
Meter类型描述
Gauge单一最新值度量器
Counter单一累加度量器,可增可减
Summary多Measurement输出计数器,将输出total(合计)、count(计数)、max(最大)、average(合计/计数)和tps(合计/时间间隔),无单位
Timer多Measurement输出计时器,将输出total(合计)、count(计数)、max(最大)和average(合计/计数),支持微秒为单位累计
-
-

说明:

-
    -
  1. 未来可能增加更丰富复杂的度量器例如Histogram,这是一种可以本地统计聚合75th, 90th, 95th, 98th, 99th,99.9th...的度量器,适合某些场合,但需要更多内存。
  2. -
  3. 所有的计量器都将继承自Meter,所有的计量器执行measure()方法后,都将归一化的生成1或N个Measurement结果。
  4. -
-
-

它也会实现一个内存的Registry和PrometheusExporter,将度量数据同步给Prometheus。

-
-

说明:不同的监控系统,采集度量数据的方式不尽相同,例如Zabbix支持用zabbix-agent推送,Prometheus则推荐使用prometheus-server拉取的方式;同样数据交换协议也不同,因此往往需要逐一适配。

-
-

如何使用

-
引入依赖
-

如果需要开启TC的Metrics,只需要在seata-server的pom中增加:

-
<dependencies>
-	<dependency>
-		<groupId>${project.groupId}</groupId>
-		<artifactId>seata-core</artifactId>
-	</dependency>
-	<!--导入依赖,启用Metrics-->
-	<dependency>
-		<groupId>${project.groupId}</groupId>
-		<artifactId>seata-metrics-prometheus</artifactId>
-	</dependency>
-	<dependency>
-		<groupId>commons-lang</groupId>
-		<artifactId>commons-lang</artifactId>
-	</dependency>
-	<dependency>
-		<groupId>org.testng</groupId>
-		<artifactId>testng</artifactId>
-		<scope>test</scope>
-	</dependency>
-</dependencies>
-
-

之后启动TC,即可在http://tc-server-ip:9898/metrics上获取到Metrics的文本格式数据。

-
-

提示:默认使用9898端口,Prometheus已登记的端口列表在此,如果想更换端口,可通过metrics.exporter.prometheus.port配置修改。

-
-
下载并启动Prometheus
-

下载完毕后,修改Prometheus的配置文件prometheus.yml,在scrape_configs中增加一项抓取Seata的度量数据:

-
scrape_configs:
-  # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
-  - job_name: 'prometheus'
-
-    # metrics_path defaults to '/metrics'
-    # scheme defaults to 'http'.
-
-    static_configs:
-    - targets: ['localhost:9090']
-
-  - job_name: 'seata'
-
-    # metrics_path defaults to '/metrics'
-    # scheme defaults to 'http'.
-
-    static_configs:
-    - targets: ['tc-server-ip:9898']
-
-
查看数据输出
-

推荐结合配置Grafana获得更好的查询效果,初期Seata导出的Metrics包括:

-
    -
  • TC :
  • -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Metrics描述
seata.transaction(role=tc,meter=counter,status=active/committed/rollback)当前活动中/已提交/已回滚的事务总数
seata.transaction(role=tc,meter=summary,statistic=count,status=committed/rollback)当前周期内提交/回滚的事务数
seata.transaction(role=tc,meter=summary,statistic=tps,status=committed/rollback)当前周期内提交/回滚的事务TPS(transaction per second)
seata.transaction(role=tc,meter=timer,statistic=total,status=committed/rollback)当前周期内提交/回滚的事务耗时总和
seata.transaction(role=tc,meter=timer,statistic=count,status=committed/rollback)当前周期内提交/回滚的事务数
seata.transaction(role=tc,meter=timer,statistic=average,status=committed/rollback)当前周期内提交/回滚的事务平均耗时
seata.transaction(role=tc,meter=timer,statistic=max,status=committed/rollback)当前周期内提交/回滚的事务最大耗时
-
-

提示:seata.transaction(role=tc,meter=summary,statistic=count,status=committed/rollback)和seata.transaction(role=tc,meter=timer,statistic=count,status=committed/rollback)的值可能相同,但它们来源于两个不同的度量器。

-
-
    -
  • TM:
  • -
-

稍后实现,包括诸如: -seata.transaction(role=tm,name={GlobalTransactionalName},meter=counter,status=active/committed/rollback) : 以GlobalTransactionalName为维度区分不同Transactional的状态。

-
    -
  • RM:
  • -
-

稍后实现,包括诸如: -seata.transaction(role=rm,name={BranchTransactionalName},mode=at/mt,meter=counter,status=active/committed/rollback):以BranchTransactionalName为维度以及AT/MT维度区分不同分支Transactional的状态。

-

如何扩展

-

如果有下面几种情况:

-
    -
  1. 您不是使用Prometheus作为运维监控系统,但希望能够将Seata的Metrics数据集成进Dashboard中;
  2. -
  3. 您需要更复杂强大的度量器类型,这些度量器在其他Metrics实现库中已有,希望集成这些第三方依赖直接使用;
  4. -
  5. 您需要改变默认Metric的Measurement输出,例如在Timer中增加一个minsd(方差);
  6. -
  7. ...
  8. -
-

那么需要自行扩展Metrics的实现,请创建新的模块项目例如seata-metrics-xxxx,之后:

-
    -
  • 针对1:您需要实现新的Exporter;
  • -
  • 针对2:您可以改变默认Registry的实现,返回第三方的Meter计量器实现;
  • -
  • 针对3:您可以修改对应Meter的实现,包括measure()方法返回的Measurement列表。
  • -
-
- - - - - - - diff --git a/zh-cn/docs/dev/seata-mertics.json b/zh-cn/docs/dev/seata-mertics.json deleted file mode 100644 index b51ba6a7..00000000 --- a/zh-cn/docs/dev/seata-mertics.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "seata-mertics.md", - "__html": "

Metrics

\n

设计思路

\n
    \n
  1. Seata作为一个被集成的数据一致性框架,Metrics模块将尽可能少的使用第三方依赖以降低发生冲突的风险;
  2. \n
  3. Metrics模块将竭力争取更高的度量性能和更低的资源开销,尽可能降低开启后带来的副作用;
  4. \n
  5. 插件式——Metrics是否激活、数据如何发布,去取决于是否引入了对应的依赖,例如在TC Server中引入seata-metrics-prometheus,则自动启用并将度量数据发布到Prometheus
  6. \n
  7. 不使用Spring,使用SPI(Service Provider Interface)加载扩展;
  8. \n
  9. 初始仅发布核心Transaction相关指标,之后结合社区的需求,逐步完善运维方面的所有其他指标。
  10. \n
\n

模块说明

\n

由1个核心API模块seata-metrics-api和N个对接实现模块如seata-metrics-prometheus构成:

\n
    \n
  • seata-metrics-api模块
  • \n
\n

此模块是Metrics的核心,将作为Seata基础架构的一部分被TC、TM和RM引用,它内部没有任何具体实现代码,仅包含接口定义,定义的内容包括:

\n
    \n
  1. Meter类接口:GaugeCounterTimer...
  2. \n
  3. 注册容器接口Registry
  4. \n
  5. Measurement发布接口Publisher
  6. \n
\n
\n

提示:Metrics本身在开源领域也已有很多实现,例如

\n
    \n
  1. Netflix-Spectator
  2. \n
  3. Dropwizard-Metrics
  4. \n
  5. Dubbo-Metrics
  6. \n
\n
\n
\n

它们有的轻而敏捷,有的重而强大,由于也是“实现”,因此不会纳入seata-metrics-api中,避免实现绑定。

\n
\n
    \n
  • seata-metrics-prometheus模块
  • \n
\n

这是我们默认提供的Metrics实现,不使用其它Metrics开源实现,并轻量级的实现了以下三个Meter:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
Meter类型描述
Gauge单一最新值度量器
Counter单一累加度量器,可增可减
Summary多Measurement输出计数器,将输出total(合计)、count(计数)、max(最大)、average(合计/计数)和tps(合计/时间间隔),无单位
Timer多Measurement输出计时器,将输出total(合计)、count(计数)、max(最大)和average(合计/计数),支持微秒为单位累计
\n
\n

说明:

\n
    \n
  1. 未来可能增加更丰富复杂的度量器例如Histogram,这是一种可以本地统计聚合75th, 90th, 95th, 98th, 99th,99.9th...的度量器,适合某些场合,但需要更多内存。
  2. \n
  3. 所有的计量器都将继承自Meter,所有的计量器执行measure()方法后,都将归一化的生成1或N个Measurement结果。
  4. \n
\n
\n

它也会实现一个内存的Registry和PrometheusExporter,将度量数据同步给Prometheus。

\n
\n

说明:不同的监控系统,采集度量数据的方式不尽相同,例如Zabbix支持用zabbix-agent推送,Prometheus则推荐使用prometheus-server拉取的方式;同样数据交换协议也不同,因此往往需要逐一适配。

\n
\n

如何使用

\n
引入依赖
\n

如果需要开启TC的Metrics,只需要在seata-server的pom中增加:

\n
<dependencies>\n\t<dependency>\n\t\t<groupId>${project.groupId}</groupId>\n\t\t<artifactId>seata-core</artifactId>\n\t</dependency>\n\t<!--导入依赖,启用Metrics-->\n\t<dependency>\n\t\t<groupId>${project.groupId}</groupId>\n\t\t<artifactId>seata-metrics-prometheus</artifactId>\n\t</dependency>\n\t<dependency>\n\t\t<groupId>commons-lang</groupId>\n\t\t<artifactId>commons-lang</artifactId>\n\t</dependency>\n\t<dependency>\n\t\t<groupId>org.testng</groupId>\n\t\t<artifactId>testng</artifactId>\n\t\t<scope>test</scope>\n\t</dependency>\n</dependencies>\n
\n

之后启动TC,即可在http://tc-server-ip:9898/metrics上获取到Metrics的文本格式数据。

\n
\n

提示:默认使用9898端口,Prometheus已登记的端口列表在此,如果想更换端口,可通过metrics.exporter.prometheus.port配置修改。

\n
\n
下载并启动Prometheus
\n

下载完毕后,修改Prometheus的配置文件prometheus.yml,在scrape_configs中增加一项抓取Seata的度量数据:

\n
scrape_configs:\n  # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.\n  - job_name: 'prometheus'\n\n    # metrics_path defaults to '/metrics'\n    # scheme defaults to 'http'.\n\n    static_configs:\n    - targets: ['localhost:9090']\n\n  - job_name: 'seata'\n\n    # metrics_path defaults to '/metrics'\n    # scheme defaults to 'http'.\n\n    static_configs:\n    - targets: ['tc-server-ip:9898']\n
\n
查看数据输出
\n

推荐结合配置Grafana获得更好的查询效果,初期Seata导出的Metrics包括:

\n
    \n
  • TC :
  • \n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
Metrics描述
seata.transaction(role=tc,meter=counter,status=active/committed/rollback)当前活动中/已提交/已回滚的事务总数
seata.transaction(role=tc,meter=summary,statistic=count,status=committed/rollback)当前周期内提交/回滚的事务数
seata.transaction(role=tc,meter=summary,statistic=tps,status=committed/rollback)当前周期内提交/回滚的事务TPS(transaction per second)
seata.transaction(role=tc,meter=timer,statistic=total,status=committed/rollback)当前周期内提交/回滚的事务耗时总和
seata.transaction(role=tc,meter=timer,statistic=count,status=committed/rollback)当前周期内提交/回滚的事务数
seata.transaction(role=tc,meter=timer,statistic=average,status=committed/rollback)当前周期内提交/回滚的事务平均耗时
seata.transaction(role=tc,meter=timer,statistic=max,status=committed/rollback)当前周期内提交/回滚的事务最大耗时
\n
\n

提示:seata.transaction(role=tc,meter=summary,statistic=count,status=committed/rollback)和seata.transaction(role=tc,meter=timer,statistic=count,status=committed/rollback)的值可能相同,但它们来源于两个不同的度量器。

\n
\n
    \n
  • TM:
  • \n
\n

稍后实现,包括诸如:\nseata.transaction(role=tm,name={GlobalTransactionalName},meter=counter,status=active/committed/rollback) : 以GlobalTransactionalName为维度区分不同Transactional的状态。

\n
    \n
  • RM:
  • \n
\n

稍后实现,包括诸如:\nseata.transaction(role=rm,name={BranchTransactionalName},mode=at/mt,meter=counter,status=active/committed/rollback):以BranchTransactionalName为维度以及AT/MT维度区分不同分支Transactional的状态。

\n

如何扩展

\n

如果有下面几种情况:

\n
    \n
  1. 您不是使用Prometheus作为运维监控系统,但希望能够将Seata的Metrics数据集成进Dashboard中;
  2. \n
  3. 您需要更复杂强大的度量器类型,这些度量器在其他Metrics实现库中已有,希望集成这些第三方依赖直接使用;
  4. \n
  5. 您需要改变默认Metric的Measurement输出,例如在Timer中增加一个minsd(方差);
  6. \n
  7. ...
  8. \n
\n

那么需要自行扩展Metrics的实现,请创建新的模块项目例如seata-metrics-xxxx,之后:

\n
    \n
  • 针对1:您需要实现新的Exporter;
  • \n
  • 针对2:您可以改变默认Registry的实现,返回第三方的Meter计量器实现;
  • \n
  • 针对3:您可以修改对应Meter的实现,包括measure()方法返回的Measurement列表。
  • \n
\n", - "link": "/zh-cn/docs/dev/seata-mertics.html", - "meta": { - "title": "Metrics", - "keywords": "Seata", - "description": "Metrics。" - } -} \ No newline at end of file diff --git a/zh-cn/docs/developers/committer-guide/label-an-issue-guide_dev.html b/zh-cn/docs/developers/committer-guide/label-an-issue-guide_dev.html deleted file mode 100644 index 77fb7c48..00000000 --- a/zh-cn/docs/developers/committer-guide/label-an-issue-guide_dev.html +++ /dev/null @@ -1,69 +0,0 @@ - - - - - - - - - - 给问题打标签 - - - - -
文档

给问题打标签

-

如果您正在处理一个问题,请记得给这个问题标记一个或者多个您认为有意义的标签。有了标签,其他开发人员就会很轻松地识别出问题,以便对其进行分类并跟踪进度。

-

对于需要编码和发版修复的issues和pull requests,需要您将其标记为milestone

-

一些常用的标签:

-
    -
  • 请求帮助 -
      -
    • help wanted
    • -
    • good first issue
    • -
    -
  • -
  • 优先级 -
      -
    • priority/blocker
    • -
    • priority/high
    • -
    • priority/low
    • -
    • priority/normal
    • -
    -
  • -
  • 状态 -
      -
    • status/need-triage
    • -
    • status/DO-NOT-MERGE
    • -
    • status/READY-TO-MERGE
    • -
    • status/invalid
    • -
    • status/wontfix
    • -
    -
  • -
  • 类型 -
      -
    • type/bug
    • -
    • type/documentation
    • -
    • type/enhancement
    • -
    • type/feature
    • -
    -
  • -
-
- - - - - - - diff --git a/zh-cn/docs/developers/committer-guide/label-an-issue-guide_dev.json b/zh-cn/docs/developers/committer-guide/label-an-issue-guide_dev.json deleted file mode 100644 index e7faf0bb..00000000 --- a/zh-cn/docs/developers/committer-guide/label-an-issue-guide_dev.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "label-an-issue-guide_dev.md", - "__html": "

给问题打标签

\n

如果您正在处理一个问题,请记得给这个问题标记一个或者多个您认为有意义的标签。有了标签,其他开发人员就会很轻松地识别出问题,以便对其进行分类并跟踪进度。

\n

对于需要编码和发版修复的issues和pull requests,需要您将其标记为milestone

\n

一些常用的标签:

\n
    \n
  • 请求帮助\n
      \n
    • help wanted
    • \n
    • good first issue
    • \n
    \n
  • \n
  • 优先级\n
      \n
    • priority/blocker
    • \n
    • priority/high
    • \n
    • priority/low
    • \n
    • priority/normal
    • \n
    \n
  • \n
  • 状态\n
      \n
    • status/need-triage
    • \n
    • status/DO-NOT-MERGE
    • \n
    • status/READY-TO-MERGE
    • \n
    • status/invalid
    • \n
    • status/wontfix
    • \n
    \n
  • \n
  • 类型\n
      \n
    • type/bug
    • \n
    • type/documentation
    • \n
    • type/enhancement
    • \n
    • type/feature
    • \n
    \n
  • \n
\n", - "link": "/zh-cn/docs/developers/committer-guide/label-an-issue-guide_dev.html", - "meta": { - "title": "给问题打标签", - "keywords": "Seata", - "description": "如果您正在处理一个问题,请记得给这个问题标记一个或者多个您认为有意义的标签。有了标签,其他开发人员就会很轻松地识别出问题,以便对其进行分类并跟踪进度。" - } -} \ No newline at end of file diff --git a/zh-cn/docs/developers/committer-guide/release-guide_dev.html b/zh-cn/docs/developers/committer-guide/release-guide_dev.html deleted file mode 100644 index 7ce7a084..00000000 --- a/zh-cn/docs/developers/committer-guide/release-guide_dev.html +++ /dev/null @@ -1,33 +0,0 @@ - - - - - - - - - - Seata版本向导 - - - - -
文档

TBD

-
- - - - - - - diff --git a/zh-cn/docs/developers/committer-guide/release-guide_dev.json b/zh-cn/docs/developers/committer-guide/release-guide_dev.json deleted file mode 100644 index 2262d31d..00000000 --- a/zh-cn/docs/developers/committer-guide/release-guide_dev.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "release-guide_dev.md", - "__html": "

TBD

\n", - "link": "/zh-cn/docs/developers/committer-guide/release-guide_dev.html", - "meta": { - "title": "Seata版本向导", - "keywords": "Seata", - "description": "Seata版本向导。" - } -} \ No newline at end of file diff --git a/zh-cn/docs/developers/committer-guide/website-guide_dev.html b/zh-cn/docs/developers/committer-guide/website-guide_dev.html deleted file mode 100644 index 6a9644ce..00000000 --- a/zh-cn/docs/developers/committer-guide/website-guide_dev.html +++ /dev/null @@ -1,37 +0,0 @@ - - - - - - - - - - 网站向导 - - - - -
文档

网站向导

-
    -
  1. Seata 的网站仓库是 https://github.com/seata/seata.github.io
  2. -
  3. 网站构建完毕后,它会被自动发布到 seata.io
  4. -
-
- - - - - - - diff --git a/zh-cn/docs/developers/committer-guide/website-guide_dev.json b/zh-cn/docs/developers/committer-guide/website-guide_dev.json deleted file mode 100644 index c3102cb2..00000000 --- a/zh-cn/docs/developers/committer-guide/website-guide_dev.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "website-guide_dev.md", - "__html": "

网站向导

\n
    \n
  1. Seata 的网站仓库是 https://github.com/seata/seata.github.io
  2. \n
  3. 网站构建完毕后,它会被自动发布到 seata.io
  4. \n
\n", - "link": "/zh-cn/docs/developers/committer-guide/website-guide_dev.html", - "meta": { - "title": "网站向导", - "keywords": "Seata", - "description": "Seata 网站向导。" - } -} \ No newline at end of file diff --git a/zh-cn/docs/developers/contributor-guide/new-contributor-guide_dev.html b/zh-cn/docs/developers/contributor-guide/new-contributor-guide_dev.html deleted file mode 100644 index 739ef0cd..00000000 --- a/zh-cn/docs/developers/contributor-guide/new-contributor-guide_dev.html +++ /dev/null @@ -1,55 +0,0 @@ - - - - - - - - - - 新贡献者向导 - - - - -
文档

新贡献者向导

-

这篇向导旨在给正在准备向Seata提交贡献的新手提供指导。

-

邮件列表描述

-

TBD

-

报告问题

-

您始终可以通过Github Issues 向Seata报告问题。

-

如果您正在报告bug,请参阅问题报告模版

-

如果您正在报告功能要求,请参阅问题报告模版

-

如果您正在报告常规问题,比如提出一个问题,则可以打开常规问题

-

发送 pull request

-
    -
  • 参考pull request template
  • -
  • 在您发送pull request之前,请同步您的github仓库和远程仓库,这会使您的pull request简单明了,具体操作请看如下所示步骤:
  • -
-
git remote add upstream git@github.com:seata/seata.git
-git fetch upstream
-git rebase upstream/master
-git checkout -b your_awesome_patch
-... add some work
-git push origin your_awesome_patch
-
-

编码规范

-

请按照CONTRIBUTING.md中的编码规范对自己的代码进行检查。

-
- - - - - - - diff --git a/zh-cn/docs/developers/contributor-guide/new-contributor-guide_dev.json b/zh-cn/docs/developers/contributor-guide/new-contributor-guide_dev.json deleted file mode 100644 index 54595364..00000000 --- a/zh-cn/docs/developers/contributor-guide/new-contributor-guide_dev.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "new-contributor-guide_dev.md", - "__html": "

新贡献者向导

\n

这篇向导旨在给正在准备向Seata提交贡献的新手提供指导。

\n

邮件列表描述

\n

TBD

\n

报告问题

\n

您始终可以通过Github Issues 向Seata报告问题。

\n

如果您正在报告bug,请参阅问题报告模版

\n

如果您正在报告功能要求,请参阅问题报告模版

\n

如果您正在报告常规问题,比如提出一个问题,则可以打开常规问题

\n

发送 pull request

\n
    \n
  • 参考pull request template
  • \n
  • 在您发送pull request之前,请同步您的github仓库和远程仓库,这会使您的pull request简单明了,具体操作请看如下所示步骤:
  • \n
\n
git remote add upstream git@github.com:seata/seata.git\ngit fetch upstream\ngit rebase upstream/master\ngit checkout -b your_awesome_patch\n... add some work\ngit push origin your_awesome_patch\n
\n

编码规范

\n

请按照CONTRIBUTING.md中的编码规范对自己的代码进行检查。

\n", - "link": "/zh-cn/docs/developers/contributor-guide/new-contributor-guide_dev.html", - "meta": { - "title": "新贡献者向导", - "keywords": "Seata", - "description": "这篇向导旨在给正在准备向Seata提交贡献的新手提供指导。" - } -} \ No newline at end of file diff --git a/zh-cn/docs/developers/contributor-guide/reporting-security-issues_dev.html b/zh-cn/docs/developers/contributor-guide/reporting-security-issues_dev.html deleted file mode 100644 index 0e00b3e5..00000000 --- a/zh-cn/docs/developers/contributor-guide/reporting-security-issues_dev.html +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - 报告安全问题 - - - - -
文档

报告安全问题

-

Seata在消除其软件项目中的安全性问题方面采取严格的立场,对与其功能和特性有关的问题非常敏感并很快提出。

-

报告漏洞

-

如果您对Seata的安全性有担心,或者发现漏洞或潜在威胁,请发送电子邮件至dev-seata@googlegroups.com与Seata安全团队联系。在邮件中,指定问题或潜在威胁的描述。还敦促您推荐重现和复制问题的方法。Seata社区会在评估和分析调查结果之后与您联系。 请先注意在安全电子邮件中报告安全问题,然后再在公共领域公开该问题。

-

漏洞处理

-

漏洞处理过程的概述是:

-
    -
  • 报告者将漏洞秘密报告给Seata。
  • -
  • 相应项目的安全团队与报告者私下合作来解决漏洞。
  • -
  • 制作了包含该修复程序的有关Seata产品的新版本。
  • -
  • 该漏洞已公开宣布。
  • -
-
- - - - - - - diff --git a/zh-cn/docs/developers/contributor-guide/reporting-security-issues_dev.json b/zh-cn/docs/developers/contributor-guide/reporting-security-issues_dev.json deleted file mode 100644 index e6459ee0..00000000 --- a/zh-cn/docs/developers/contributor-guide/reporting-security-issues_dev.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "reporting-security-issues_dev.md", - "__html": "

报告安全问题

\n

Seata在消除其软件项目中的安全性问题方面采取严格的立场,对与其功能和特性有关的问题非常敏感并很快提出。

\n

报告漏洞

\n

如果您对Seata的安全性有担心,或者发现漏洞或潜在威胁,请发送电子邮件至dev-seata@googlegroups.com与Seata安全团队联系。在邮件中,指定问题或潜在威胁的描述。还敦促您推荐重现和复制问题的方法。Seata社区会在评估和分析调查结果之后与您联系。 请先注意在安全电子邮件中报告安全问题,然后再在公共领域公开该问题。

\n

漏洞处理

\n

漏洞处理过程的概述是:

\n
    \n
  • 报告者将漏洞秘密报告给Seata。
  • \n
  • 相应项目的安全团队与报告者私下合作来解决漏洞。
  • \n
  • 制作了包含该修复程序的有关Seata产品的新版本。
  • \n
  • 该漏洞已公开宣布。
  • \n
\n", - "link": "/zh-cn/docs/developers/contributor-guide/reporting-security-issues_dev.html", - "meta": { - "title": "报告安全问题", - "keywords": "Seata", - "description": "Seata在消除其软件项目中的安全性问题方面采取严格的立场,对与其功能和特性有关的问题非常敏感并很快提出。" - } -} \ No newline at end of file diff --git a/zh-cn/docs/developers/contributor-guide/test-coverage-guide_dev.html b/zh-cn/docs/developers/contributor-guide/test-coverage-guide_dev.html deleted file mode 100644 index 1da5d385..00000000 --- a/zh-cn/docs/developers/contributor-guide/test-coverage-guide_dev.html +++ /dev/null @@ -1,53 +0,0 @@ - - - - - - - - - - 测试覆盖率向导 - - - - -
文档

测试覆盖率向导

-

1.写单元测试的收益

-
    -
  • 单元测试能帮助每个人深入代码细节,了解代码的功能。
  • -
  • 通过测试用例我们能发现bug,并提交代码的健壮性。
  • -
  • 测试用例同时也是代码的demo用法。
  • -
-

2.单元测试用例的一些设计原则

-
    -
  • 应该精心设计好步骤,颗粒度和组合条件。
  • -
  • 注意边界条件。
  • -
  • 单元测试也应该好好设计,不要写无用的代码。
  • -
  • 当你发现一个方法很难写单元测试时,如果可以确认这个方法臭代码,那么就和开发者一起重构它。
  • -
  • Seata中用的mock框架是: mockito. 下面是一些开发向导:mockito tutorial,mockito refcard
  • -
  • TDD(可选):当你开始写一个新的功能时,你可以试着先写测试用例。
  • -
-

3.测试覆盖率设定值

-
    -
  • 在现阶段,Delta更改代码的测试覆盖设定值为:>=80%,越高越好。
  • -
  • 我们可以在这个页面中看到测试报告: https://codecov.io/gh/seata/seata
  • -
-
- - - - - - - diff --git a/zh-cn/docs/developers/contributor-guide/test-coverage-guide_dev.json b/zh-cn/docs/developers/contributor-guide/test-coverage-guide_dev.json deleted file mode 100644 index 5659fb48..00000000 --- a/zh-cn/docs/developers/contributor-guide/test-coverage-guide_dev.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "test-coverage-guide_dev.md", - "__html": "

测试覆盖率向导

\n

1.写单元测试的收益

\n
    \n
  • 单元测试能帮助每个人深入代码细节,了解代码的功能。
  • \n
  • 通过测试用例我们能发现bug,并提交代码的健壮性。
  • \n
  • 测试用例同时也是代码的demo用法。
  • \n
\n

2.单元测试用例的一些设计原则

\n
    \n
  • 应该精心设计好步骤,颗粒度和组合条件。
  • \n
  • 注意边界条件。
  • \n
  • 单元测试也应该好好设计,不要写无用的代码。
  • \n
  • 当你发现一个方法很难写单元测试时,如果可以确认这个方法臭代码,那么就和开发者一起重构它。
  • \n
  • Seata中用的mock框架是: mockito. 下面是一些开发向导:mockito tutorial,mockito refcard
  • \n
  • TDD(可选):当你开始写一个新的功能时,你可以试着先写测试用例。
  • \n
\n

3.测试覆盖率设定值

\n
    \n
  • 在现阶段,Delta更改代码的测试覆盖设定值为:>=80%,越高越好。
  • \n
  • 我们可以在这个页面中看到测试报告: https://codecov.io/gh/seata/seata
  • \n
\n", - "link": "/zh-cn/docs/developers/contributor-guide/test-coverage-guide_dev.html", - "meta": { - "title": "测试覆盖率向导", - "keywords": "Seata", - "description": "测试覆盖率向导。" - } -} \ No newline at end of file diff --git a/zh-cn/docs/developers/developers_dev.html b/zh-cn/docs/developers/developers_dev.html deleted file mode 100644 index 4007acea..00000000 --- a/zh-cn/docs/developers/developers_dev.html +++ /dev/null @@ -1,188 +0,0 @@ - - - - - - - - - - Seata 维护者 - - - - -
文档

开发人员

-

本页面展示了Seata的开发团队。请通过提交PR的方式把自己的信息添加到列表上。注:排名不分先后

-

Seata Committer 列表

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
姓名github公司
季敏slievrlyAlibabba
申海强sharajavaAlibabba
厉启鹏wlliqipengAlibabba
陈连东purple-forceAlibabba
姜宇jiangyu-gtsAlibabba
张伟jezhang2014Alibabba
张森zhangthenAntfin
王光树wgs13579Antfin
章耿ujjboyAntfin
雷志远leizhiyuanAntfin
王清jovany-wangAntfin
吴江坷xingfudeshiTruthai
李钊CoffeeLatte007YuanFuDao
王欣lovepoemWeiDian
袁国尧github-ygyTuya
张旭zhangxu19830126InfiniVision
任帅鹏niaoshuaiHuanQiuYouLu
翟东林tony-zdlSprings Capital
申旭刚xuririseCNIC
许德佑skyesxWeBank
庄金雷zjinleiHelios
张嘉伟l81893521Locals
钟正涛jsbxyyxShenzhen arts
陈龙long187Antfin
-

Seata开发者角色

-

Seata开发者包含Maintainer、Committer、Contributor三种角色,每种角色的标准定义如下。

-

Maintainer

-

Maintainer是对Seata项目(包括Seata下的项目)的演进和发展做出显著贡献的个人。具体包含以下的标准:

-
    -
  • 完成多个关键模块或者工程的设计与开发,是项目的核心开发人员;
  • -
  • 持续的投入和激情,能够积极参与社区、官网、issue、PR等项目相关事项的维护;
  • -
  • 在社区中具有有目共睹的影响力,能够代表Seata参加重要的社区会议和活动;
  • -
  • 具有培养Committer和Contributor的意识和能力;
  • -
-

Committer

-

Committer是具有Seata仓库写权限的个人,包含以下的标准:

-
    -
  • 能够在长时间内做持续贡献issue、PR的个人;
  • -
  • 参与issue列表的维护及重要feature的讨论;
  • -
  • 参与code review;
  • -
-

Contributor

-

Contributor是对Seata项目有贡献的个人,标准为:

-
    -
  • 提交过PR并被合并;
  • -
-
- - - - - - - diff --git a/zh-cn/docs/developers/developers_dev.json b/zh-cn/docs/developers/developers_dev.json deleted file mode 100644 index b9372ed6..00000000 --- a/zh-cn/docs/developers/developers_dev.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "developers_dev.md", - "__html": "

开发人员

\n

本页面展示了Seata的开发团队。请通过提交PR的方式把自己的信息添加到列表上。注:排名不分先后

\n

Seata Committer 列表

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
姓名github公司
季敏slievrlyAlibabba
申海强sharajavaAlibabba
厉启鹏wlliqipengAlibabba
陈连东purple-forceAlibabba
姜宇jiangyu-gtsAlibabba
张伟jezhang2014Alibabba
张森zhangthenAntfin
王光树wgs13579Antfin
章耿ujjboyAntfin
雷志远leizhiyuanAntfin
王清jovany-wangAntfin
吴江坷xingfudeshiTruthai
李钊CoffeeLatte007YuanFuDao
王欣lovepoemWeiDian
袁国尧github-ygyTuya
张旭zhangxu19830126InfiniVision
任帅鹏niaoshuaiHuanQiuYouLu
翟东林tony-zdlSprings Capital
申旭刚xuririseCNIC
许德佑skyesxWeBank
庄金雷zjinleiHelios
张嘉伟l81893521Locals
钟正涛jsbxyyxShenzhen arts
陈龙long187Antfin
\n

Seata开发者角色

\n

Seata开发者包含Maintainer、Committer、Contributor三种角色,每种角色的标准定义如下。

\n

Maintainer

\n

Maintainer是对Seata项目(包括Seata下的项目)的演进和发展做出显著贡献的个人。具体包含以下的标准:

\n
    \n
  • 完成多个关键模块或者工程的设计与开发,是项目的核心开发人员;
  • \n
  • 持续的投入和激情,能够积极参与社区、官网、issue、PR等项目相关事项的维护;
  • \n
  • 在社区中具有有目共睹的影响力,能够代表Seata参加重要的社区会议和活动;
  • \n
  • 具有培养Committer和Contributor的意识和能力;
  • \n
\n

Committer

\n

Committer是具有Seata仓库写权限的个人,包含以下的标准:

\n
    \n
  • 能够在长时间内做持续贡献issue、PR的个人;
  • \n
  • 参与issue列表的维护及重要feature的讨论;
  • \n
  • 参与code review;
  • \n
\n

Contributor

\n

Contributor是对Seata项目有贡献的个人,标准为:

\n
    \n
  • 提交过PR并被合并;
  • \n
\n", - "link": "/zh-cn/docs/developers/developers_dev.html", - "meta": { - "title": "Seata 维护者", - "keywords": "Seata, 维护者", - "description": "Seata 维护者名单" - } -} \ No newline at end of file diff --git a/zh-cn/docs/developers/developers_dev.md.html b/zh-cn/docs/developers/developers_dev.md.html deleted file mode 100644 index d3c505a1..00000000 --- a/zh-cn/docs/developers/developers_dev.md.html +++ /dev/null @@ -1,179 +0,0 @@ - - - - - - - - - - Seata 维护者 - - - - -
文档

Seata开发者角色

-

Seata开发者包含Maintainer、Committer、Contributor三种角色,每种角色的标准定义如下。

-

Maintainer

-

Maintainer是对Seata项目(包括Seata下的项目)的演进和发展做出显著贡献的个人。具体包含以下的标准:

-
    -
  • 完成多个关键模块或者工程的设计与开发,是项目的核心开发人员;
  • -
  • 持续的投入和激情,能够积极参与社区、官网、issue、PR等项目相关事项的维护;
  • -
  • 在社区中具有有目共睹的影响力,能够代表Seata参加重要的社区会议和活动;
  • -
  • 具有培养Committer和Contributor的意识和能力;
  • -
-

Committer

-

Committer是具有Seata仓库写权限的个人,包含以下的标准:

-
    -
  • 能够在长时间内做持续贡献issue、PR的个人;
  • -
  • 参与issue列表的维护及重要feature的讨论;
  • -
  • 参与code review;
  • -
-

Contributor

-

Contributor是对Seata项目有贡献的个人,标准为:

-
    -
  • 提交过PR并被合并;
  • -
-

开发人员

-

本页面展示了Seata的开发团队。请通过提交PR的方式把自己的信息添加到列表上。注:排名不分先后

-

Seata 维护者

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
姓名github公司
季敏slievrlyAlibabba
申海强sharajavaAlibabba
厉启鹏wlliqipengAlibabba
陈连东purple-forceAlibabba
姜宇jiangyu-gtsAlibabba
张伟jezhang2014Alibabba
张森zhangthenAntfin
王光树wgs13579Antfin
章耿ujjboyAntfin
雷志远leizhiyuanAntfin
王清jovany-wangAntfin
吴江坷xingfudeshiTruthai
李钊CoffeeLatte007YuanFuDao
王欣lovepoemWeiDian
袁国尧github-ygyTuya
张旭zhangxu19830126InfiniVision
任帅鹏niaoshuaiHuanQiuYouLu
翟东林tony-zdlSprings Capital
申旭刚xuririseCNIC
许德佑skyesxWeBank
庄金雷zjinleiHelios
张嘉伟l81893521Locals
钟正涛jsbxyyxShenzhen arts
陈龙long187Antfin
-
- - - - - - diff --git a/zh-cn/docs/developers/developers_dev.md.json b/zh-cn/docs/developers/developers_dev.md.json deleted file mode 100644 index 99afb7ea..00000000 --- a/zh-cn/docs/developers/developers_dev.md.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "developers_dev.md.md", - "__html": "

Seata开发者角色

\n

Seata开发者包含Maintainer、Committer、Contributor三种角色,每种角色的标准定义如下。

\n

Maintainer

\n

Maintainer是对Seata项目(包括Seata下的项目)的演进和发展做出显著贡献的个人。具体包含以下的标准:

\n
    \n
  • 完成多个关键模块或者工程的设计与开发,是项目的核心开发人员;
  • \n
  • 持续的投入和激情,能够积极参与社区、官网、issue、PR等项目相关事项的维护;
  • \n
  • 在社区中具有有目共睹的影响力,能够代表Seata参加重要的社区会议和活动;
  • \n
  • 具有培养Committer和Contributor的意识和能力;
  • \n
\n

Committer

\n

Committer是具有Seata仓库写权限的个人,包含以下的标准:

\n
    \n
  • 能够在长时间内做持续贡献issue、PR的个人;
  • \n
  • 参与issue列表的维护及重要feature的讨论;
  • \n
  • 参与code review;
  • \n
\n

Contributor

\n

Contributor是对Seata项目有贡献的个人,标准为:

\n
    \n
  • 提交过PR并被合并;
  • \n
\n

开发人员

\n

本页面展示了Seata的开发团队。请通过提交PR的方式把自己的信息添加到列表上。注:排名不分先后

\n

Seata 维护者

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
姓名github公司
季敏slievrlyAlibabba
申海强sharajavaAlibabba
厉启鹏wlliqipengAlibabba
陈连东purple-forceAlibabba
姜宇jiangyu-gtsAlibabba
张伟jezhang2014Alibabba
张森zhangthenAntfin
王光树wgs13579Antfin
章耿ujjboyAntfin
雷志远leizhiyuanAntfin
王清jovany-wangAntfin
吴江坷xingfudeshiTruthai
李钊CoffeeLatte007YuanFuDao
王欣lovepoemWeiDian
袁国尧github-ygyTuya
张旭zhangxu19830126InfiniVision
任帅鹏niaoshuaiHuanQiuYouLu
翟东林tony-zdlSprings Capital
申旭刚xuririseCNIC
许德佑skyesxWeBank
庄金雷zjinleiHelios
张嘉伟l81893521Locals
钟正涛jsbxyyxShenzhen arts
陈龙long187Antfin
\n", - "link": "/zh-cn/docs/developers/developers_dev.md.html", - "meta": { - "title": "Seata 维护者", - "keywords": "Seata, 维护者", - "description": "Seata 维护者名单" - } -} \ No newline at end of file diff --git a/zh-cn/docs/developers/guide_dev.html b/zh-cn/docs/developers/guide_dev.html deleted file mode 100644 index 455273c9..00000000 --- a/zh-cn/docs/developers/guide_dev.html +++ /dev/null @@ -1,187 +0,0 @@ - - - - - - - - - - 为Seata贡献 - - - - -
文档

为Seata贡献

-

如果您有兴趣攻克Seata,欢迎您。首先,我们非常鼓励这种意愿。这是为您提供帮助的列表。

-

话题

- -

报告安全问题

-

安全问题应该始终得到认真对待。按照我们通常的原则,我们不鼓励任何人散布安全问题。如果您发现Seata的安全问题,请不要公开讨论,甚至不要公开问题。相反,我们建议您向我们发送一封私人电子邮件至 dev-seata@googlegroups.com进行举报。

-

报告一般问题

-

坦白地说,我们认为Seata的每位用户都是非常友好的贡献者。体验Seata之后,您可能会对项目有一些反馈。然后随时通过NEW ISSUE打开问题

-

因为我们在一个分布式的方式合作项目Seata,我们对此表示赞赏编写良好详细明确的问题报告。为了提高沟通效率,我们希望每个人都可以搜索您的问题是否在搜索列表中。如果发现它存在,请在现有问题下的评论中添加您的详细信息,而不要打开一个全新的issue。

-

为了使问题详细信息尽可能地标准,我们为问题报告者设置了“ 问题模板 ”。请务必按照说明填写模板中的字段。

-

在很多情况下,您可以打开一个问题:

-
    -
  • 错误报告
  • -
  • 功能要求
  • -
  • 性能问题
  • -
  • 功能提案
  • -
  • 功能设计
  • -
  • 需要帮助
  • -
  • doc不完整
  • -
  • 测试改进
  • -
  • 有关项目的任何问题
  • -
  • 等等
  • -
-

另外,我们必须提醒您,在填写新issue时,请记住从您的帖子中删除敏感数据。敏感数据可以是密码,密钥,网络位置,私人业务数据等。

-

代码和文档贡献

-

鼓励采取一切措施使Seata项目变得更好。在GitHub上,Seata的每个改进都可以通过PR(拉取请求的缩写)来实现。

-
    -
  • 如果发现错字,请尝试解决!
  • -
  • 如果发现错误,请尝试修复它!
  • -
  • 如果发现一些冗余代码,请尝试将其删除!
  • -
  • 如果发现缺少一些测试用例,请尝试添加它们!
  • -
  • 如果您可以增强功能,请不要犹豫!
  • -
  • 如果发现隐式代码,请尝试添加注释以使其清晰!
  • -
  • 如果您发现代码丑陋,请尝试重构它!
  • -
  • 如果可以帮助改善文档,那就再好不过了!
  • -
  • 如果发现文档不正确,请直接解决该问题!
  • -
  • ...
  • -
-

实际上,不可能完全列出它们。只要记住一个原则:

-
-

我们期待您的任何回复。

-
-

由于您已准备好通过PR改善Seata,因此建议您在此处查看PR规则。

- -

工作准备

-

要提出PR,我们假设您已经注册了GitHub ID。然后,您可以按照以下步骤完成准备工作:

-
    -
  1. -

    FORK Seata分支到您的存储库。要使此工作有效,您只需要单击seata / seata主页右边的按钮Fork 。然后,您将在https://github.com/<your-username>/seatayour-username找到您的存储库,这是您的GitHub用户名。

    -
  2. -
  3. -

    CLONE您自己的存储库以在本地进行开发。用于git clone git@github.com:<your-username>/seata.git将存储库克隆到本地计算机。然后,您可以创建新分支来完成您希望进行的更改。

    -
  4. -
  5. -

    Set Remote上游设置为git@github.com:seata/seata.git使用以下两个命令:

    -
  6. -
-
git remote add upstream git@github.com:seata/seata.git
-git remote set-url --push upstream no-pushing
-
-
-

使用此远程设置,您可以像这样检查git远程配置:

-
$ git remote -v
-origin     git@github.com:<your-username>/seata.git (fetch)
-origin     git@github.com:<your-username>/seata.git (push)
-upstream   git@github.com:seata/seata.git (fetch)
-upstream   no-pushing (push)
-
-
-

加上这一点,我们可以很容易地将本地分支与上游分支同步。

-

分支定义

-

现在,我们假设通过拉取请求所做的所有贡献都是针对Seata中的分支发展。在做出贡献之前,了解分支定义会有所帮助。

-

作为贡献者,请再次记住,通过拉取请求进行的每个贡献都是为了分支发展。在Seata项目中,还有其他几个分支,我们通常称它们为发布分支(例如0.6.0、0.6.1),功能分支,修补程序分支和主分支。

-

正式发布版本时,将有一个发布分支,并以版本号命名。

-

发布之后,我们将发布分支的提交合并到master分支中。

-

当发现某个版本中存在错误时,我们将决定在更高版本中进行修复或在特定修补程序版本中进行修复。当我们决定修复此修补程序版本时,我们将根据相应的发行分支检出该修补程序分支,执行代码修复和验证,然后将其合并到开发分支和master分支中。

-

对于更大的功能,我们将拉出功能分支以进行开发和验证。

-

提交规则

-

实际上,在Seata中,我们在提交时要认真对待两个规则:

- -

提交讯息

-

提交消息可以帮助审稿人更好地了解提交的PR的目的。它也可以帮助加快代码审查过程。我们鼓励贡献者清楚明白提交消息而不是模棱两可的消息。通常,我们提倡以下提交消息类型:

-
    -
  • docs:xxxx。例如,“ docs:添加有关Seata群集安装的文档”。
  • -
  • feature:xxxx。例如,“新功能:在AT模式下支持oracle”。
  • -
  • bugfix:xxxx。例如,“错误修正:修正了输入nil参数时的错误”。
  • -
  • refactor:xxxx。例如,“重构:简化以使代码更具可读性”。
  • -
  • test:xxx。例如,“测试:为func InsertIntoArray添加单元测试用例”。
  • -
  • 其他可读和显式的表达方式。
  • -
-

另一方面,我们不鼓励捐助者像以下方式提交消息:

-
    -
  • 修正错误
  • -
  • 更新
  • -
  • 添加文档
  • -
-

如果您迷路了,请参阅《如何编写Git提交消息》作为开始。

-

提交内容

-

提交内容表示一次提交中包含的所有内容更改。我们最好将内容包含在一个提交中,这样可以在没有任何其他提交帮助的情况下支持审阅者的完整审阅。换句话说,一次提交中的内容可以传递CI以避免代码混乱。简而言之,我们要记住三个小规则:

-
    -
  • 避免在提交中进行很大的更改;
  • -
  • 每次提交均完整且可审查。
  • -
  • 提交时检查git config(user.nameuser.email)以确保它与您的github ID相关联。
  • -
-

另外,在代码更改部分,我们建议所有贡献者都应阅读Seata代码样式

-

无论提交消息还是提交内容,我们都更加注重代码审查。

-

PR 说明

-

PR是更改Seata项目文件的唯一方法。为了帮助审稿人更好地实现目标,PR 说明不能太详细。我们鼓励贡献者遵循PR模板完成请求请求。

-

测试用例贡献

-

任何测试用例都将受到欢迎。当前,Seata功能测试用例是高度优先的。

-
    -
  • -

    对于单元测试,您需要创建一个xxxTest.java在同一模块的测试目录中命名的测试文件。推荐您使用junit5 UT框架

    -
  • -
  • -

    对于集成测试,您可以将集成测试放在测试目录或seata-test模块中。建议使用 mockito 测试框架。

    -
  • -
-

致力于帮助任何事情

-

我们选择GitHub作为Seata合作的主要场所。因此,Seata的最新更新始终在这里。尽管通过PR捐款是一种明确的帮助方式,但我们仍然呼吁其他方式。

-
    -
  • 如果可以的话,回复他人的问题;
  • -
  • 帮助解决其他用户的问题;
  • -
  • 帮助审查他人的PR设计;
  • -
  • 帮助审查PR中其他人的代码;
  • -
  • 讨论有关Seata的问题,以使事情更加清晰;
  • -
  • 在GitHub之外倡导Seata技术;
  • -
  • 在Seata上写博客,等等。
  • -
-

代码风格

-

Seata代码样式符合阿里巴巴Java编码准则。

-

指导方针

-

阿里巴巴Java编码指南

-

IDE插件安装(不必要)

-

如果要在编码时发现问题,则无需安装。

-

idea IDE

-

p3c-idea-plugin-install

-

eclipse IDE

-

p3c-eclipse-plugin-install -总之,任何帮助都是贡献。

-
- - - - - - - diff --git a/zh-cn/docs/developers/guide_dev.json b/zh-cn/docs/developers/guide_dev.json deleted file mode 100644 index 4affd81b..00000000 --- a/zh-cn/docs/developers/guide_dev.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "guide_dev.md", - "__html": "

为Seata贡献

\n

如果您有兴趣攻克Seata,欢迎您。首先,我们非常鼓励这种意愿。这是为您提供帮助的列表。

\n

话题

\n\n

报告安全问题

\n

安全问题应该始终得到认真对待。按照我们通常的原则,我们不鼓励任何人散布安全问题。如果您发现Seata的安全问题,请不要公开讨论,甚至不要公开问题。相反,我们建议您向我们发送一封私人电子邮件至 dev-seata@googlegroups.com进行举报。

\n

报告一般问题

\n

坦白地说,我们认为Seata的每位用户都是非常友好的贡献者。体验Seata之后,您可能会对项目有一些反馈。然后随时通过NEW ISSUE打开问题

\n

因为我们在一个分布式的方式合作项目Seata,我们对此表示赞赏编写良好详细明确的问题报告。为了提高沟通效率,我们希望每个人都可以搜索您的问题是否在搜索列表中。如果发现它存在,请在现有问题下的评论中添加您的详细信息,而不要打开一个全新的issue。

\n

为了使问题详细信息尽可能地标准,我们为问题报告者设置了“ 问题模板 ”。请务必按照说明填写模板中的字段。

\n

在很多情况下,您可以打开一个问题:

\n
    \n
  • 错误报告
  • \n
  • 功能要求
  • \n
  • 性能问题
  • \n
  • 功能提案
  • \n
  • 功能设计
  • \n
  • 需要帮助
  • \n
  • doc不完整
  • \n
  • 测试改进
  • \n
  • 有关项目的任何问题
  • \n
  • 等等
  • \n
\n

另外,我们必须提醒您,在填写新issue时,请记住从您的帖子中删除敏感数据。敏感数据可以是密码,密钥,网络位置,私人业务数据等。

\n

代码和文档贡献

\n

鼓励采取一切措施使Seata项目变得更好。在GitHub上,Seata的每个改进都可以通过PR(拉取请求的缩写)来实现。

\n
    \n
  • 如果发现错字,请尝试解决!
  • \n
  • 如果发现错误,请尝试修复它!
  • \n
  • 如果发现一些冗余代码,请尝试将其删除!
  • \n
  • 如果发现缺少一些测试用例,请尝试添加它们!
  • \n
  • 如果您可以增强功能,请不要犹豫!
  • \n
  • 如果发现隐式代码,请尝试添加注释以使其清晰!
  • \n
  • 如果您发现代码丑陋,请尝试重构它!
  • \n
  • 如果可以帮助改善文档,那就再好不过了!
  • \n
  • 如果发现文档不正确,请直接解决该问题!
  • \n
  • ...
  • \n
\n

实际上,不可能完全列出它们。只要记住一个原则:

\n
\n

我们期待您的任何回复。

\n
\n

由于您已准备好通过PR改善Seata,因此建议您在此处查看PR规则。

\n\n

工作准备

\n

要提出PR,我们假设您已经注册了GitHub ID。然后,您可以按照以下步骤完成准备工作:

\n
    \n
  1. \n

    FORK Seata分支到您的存储库。要使此工作有效,您只需要单击seata / seata主页右边的按钮Fork 。然后,您将在https://github.com/<your-username>/seatayour-username找到您的存储库,这是您的GitHub用户名。

    \n
  2. \n
  3. \n

    CLONE您自己的存储库以在本地进行开发。用于git clone git@github.com:<your-username>/seata.git将存储库克隆到本地计算机。然后,您可以创建新分支来完成您希望进行的更改。

    \n
  4. \n
  5. \n

    Set Remote上游设置为git@github.com:seata/seata.git使用以下两个命令:

    \n
  6. \n
\n
git remote add upstream git@github.com:seata/seata.git\ngit remote set-url --push upstream no-pushing\n\n
\n

使用此远程设置,您可以像这样检查git远程配置:

\n
$ git remote -v\norigin     git@github.com:<your-username>/seata.git (fetch)\norigin     git@github.com:<your-username>/seata.git (push)\nupstream   git@github.com:seata/seata.git (fetch)\nupstream   no-pushing (push)\n\n
\n

加上这一点,我们可以很容易地将本地分支与上游分支同步。

\n

分支定义

\n

现在,我们假设通过拉取请求所做的所有贡献都是针对Seata中的分支发展。在做出贡献之前,了解分支定义会有所帮助。

\n

作为贡献者,请再次记住,通过拉取请求进行的每个贡献都是为了分支发展。在Seata项目中,还有其他几个分支,我们通常称它们为发布分支(例如0.6.0、0.6.1),功能分支,修补程序分支和主分支。

\n

正式发布版本时,将有一个发布分支,并以版本号命名。

\n

发布之后,我们将发布分支的提交合并到master分支中。

\n

当发现某个版本中存在错误时,我们将决定在更高版本中进行修复或在特定修补程序版本中进行修复。当我们决定修复此修补程序版本时,我们将根据相应的发行分支检出该修补程序分支,执行代码修复和验证,然后将其合并到开发分支和master分支中。

\n

对于更大的功能,我们将拉出功能分支以进行开发和验证。

\n

提交规则

\n

实际上,在Seata中,我们在提交时要认真对待两个规则:

\n\n

提交讯息

\n

提交消息可以帮助审稿人更好地了解提交的PR的目的。它也可以帮助加快代码审查过程。我们鼓励贡献者清楚明白提交消息而不是模棱两可的消息。通常,我们提倡以下提交消息类型:

\n
    \n
  • docs:xxxx。例如,“ docs:添加有关Seata群集安装的文档”。
  • \n
  • feature:xxxx。例如,“新功能:在AT模式下支持oracle”。
  • \n
  • bugfix:xxxx。例如,“错误修正:修正了输入nil参数时的错误”。
  • \n
  • refactor:xxxx。例如,“重构:简化以使代码更具可读性”。
  • \n
  • test:xxx。例如,“测试:为func InsertIntoArray添加单元测试用例”。
  • \n
  • 其他可读和显式的表达方式。
  • \n
\n

另一方面,我们不鼓励捐助者像以下方式提交消息:

\n
    \n
  • 修正错误
  • \n
  • 更新
  • \n
  • 添加文档
  • \n
\n

如果您迷路了,请参阅《如何编写Git提交消息》作为开始。

\n

提交内容

\n

提交内容表示一次提交中包含的所有内容更改。我们最好将内容包含在一个提交中,这样可以在没有任何其他提交帮助的情况下支持审阅者的完整审阅。换句话说,一次提交中的内容可以传递CI以避免代码混乱。简而言之,我们要记住三个小规则:

\n
    \n
  • 避免在提交中进行很大的更改;
  • \n
  • 每次提交均完整且可审查。
  • \n
  • 提交时检查git config(user.nameuser.email)以确保它与您的github ID相关联。
  • \n
\n

另外,在代码更改部分,我们建议所有贡献者都应阅读Seata代码样式

\n

无论提交消息还是提交内容,我们都更加注重代码审查。

\n

PR 说明

\n

PR是更改Seata项目文件的唯一方法。为了帮助审稿人更好地实现目标,PR 说明不能太详细。我们鼓励贡献者遵循PR模板完成请求请求。

\n

测试用例贡献

\n

任何测试用例都将受到欢迎。当前,Seata功能测试用例是高度优先的。

\n
    \n
  • \n

    对于单元测试,您需要创建一个xxxTest.java在同一模块的测试目录中命名的测试文件。推荐您使用junit5 UT框架

    \n
  • \n
  • \n

    对于集成测试,您可以将集成测试放在测试目录或seata-test模块中。建议使用 mockito 测试框架。

    \n
  • \n
\n

致力于帮助任何事情

\n

我们选择GitHub作为Seata合作的主要场所。因此,Seata的最新更新始终在这里。尽管通过PR捐款是一种明确的帮助方式,但我们仍然呼吁其他方式。

\n
    \n
  • 如果可以的话,回复他人的问题;
  • \n
  • 帮助解决其他用户的问题;
  • \n
  • 帮助审查他人的PR设计;
  • \n
  • 帮助审查PR中其他人的代码;
  • \n
  • 讨论有关Seata的问题,以使事情更加清晰;
  • \n
  • 在GitHub之外倡导Seata技术;
  • \n
  • 在Seata上写博客,等等。
  • \n
\n

代码风格

\n

Seata代码样式符合阿里巴巴Java编码准则。

\n

指导方针

\n

阿里巴巴Java编码指南

\n

IDE插件安装(不必要)

\n

如果要在编码时发现问题,则无需安装。

\n

idea IDE

\n

p3c-idea-plugin-install

\n

eclipse IDE

\n

p3c-eclipse-plugin-install\n总之,任何帮助都是贡献。

\n", - "link": "/zh-cn/docs/developers/guide_dev.html", - "meta": { - "title": "为Seata贡献", - "keywords": "Seata", - "description": "如果您有兴趣攻克Seata,欢迎您。首先,我们非常鼓励这种意愿。这是为您提供帮助的列表。" - } -} \ No newline at end of file diff --git a/zh-cn/docs/developers/maintainers.html b/zh-cn/docs/developers/maintainers.html deleted file mode 100644 index 2fd7af09..00000000 --- a/zh-cn/docs/developers/maintainers.html +++ /dev/null @@ -1,72 +0,0 @@ - - - - - - - - - - Seata 维护者 - - - - -
文档

Seata 维护者

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
GitHub ID姓名邮箱公司
allencloudAllen Sunallensun.shl@alibaba-inc.com阿里巴巴集团
chenchaobingChaobing Chenchenchaobing@126.com美图
garfield009Zuozheng Huzuozheng.hzz@alibaba-inc.com阿里巴巴集团
lowzjJin Zhangzj3142063@gmail.com阿里巴巴集团
wangj998Jian Wangmingzhi.wj@alibaba-inc.com阿里巴巴集团
zhouhaibing089Haibing Zhouzhouhaibing089@gmail.comeBay
-
- - - - - - diff --git a/zh-cn/docs/developers/maintainers.json b/zh-cn/docs/developers/maintainers.json deleted file mode 100644 index 6c70172b..00000000 --- a/zh-cn/docs/developers/maintainers.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "maintainers.md", - "__html": "

Seata 维护者

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
GitHub ID姓名邮箱公司
allencloudAllen Sunallensun.shl@alibaba-inc.com阿里巴巴集团
chenchaobingChaobing Chenchenchaobing@126.com美图
garfield009Zuozheng Huzuozheng.hzz@alibaba-inc.com阿里巴巴集团
lowzjJin Zhangzj3142063@gmail.com阿里巴巴集团
wangj998Jian Wangmingzhi.wj@alibaba-inc.com阿里巴巴集团
zhouhaibing089Haibing Zhouzhouhaibing089@gmail.comeBay
\n", - "link": "/zh-cn/docs/developers/maintainers.html", - "meta": { - "title": "Seata 维护者", - "keywords": "Seata, 维护者", - "description": "Seata 维护者名单" - } -} \ No newline at end of file diff --git a/zh-cn/docs/faq.html b/zh-cn/docs/faq.html deleted file mode 100644 index d089d00b..00000000 --- a/zh-cn/docs/faq.html +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - faq - - - - -
文档

FAQ

-

TBD

-
- - - - - - diff --git a/zh-cn/docs/faq.json b/zh-cn/docs/faq.json deleted file mode 100644 index e02aae3b..00000000 --- a/zh-cn/docs/faq.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "filename": "faq.md", - "__html": "

FAQ

\n

TBD

\n", - "link": "/zh-cn/docs/faq.html", - "meta": {} -} \ No newline at end of file diff --git a/zh-cn/docs/faq/faq.html b/zh-cn/docs/faq/faq.html deleted file mode 100644 index 6f80309e..00000000 --- a/zh-cn/docs/faq/faq.html +++ /dev/null @@ -1,84 +0,0 @@ - - - - - - - - - - faq - - - - -
文档

常见问题

-

Q: java.lang.NoSuchMethodError: com.fasterxml.jackson.databind.jsontype.TypeSerializer.typeId(Ljava/lang/Object;Lcom/fasterxml/jackson/core/JsonToken;)

-
A: undolog序列化配置为jackson时,jackson版本需要为2.9.9+
-
-

Q: seata高可用

-
A: 0.6版本开始支持,tc使用db模式共享全局事务会话信息,注册中心使用非file的seata支持的第三方注册中心
-
-

Q: undo_log表log_status=1的记录

-
A: 场景:分支事务a注册TC后,a的本地事务提交前发生了全局事务回滚
-   后果:全局事务回滚成功,a资源被占用掉,产生了资源悬挂问题
-   防悬挂措施:a回滚时发现回滚undo还未插入,则插入一条log_status=1的undo记录,a本地事务(业务写操作sql和对应undo为一个本地事务)提交时会因为undo表主键冲突而提交失败。
-
-

Q: 隔离性

-
A: 因seata一阶段本地事务已提交,为防止其他事务脏读脏写需要加强隔离。
-    1.脏读 select语句加for update,代理方法增加@GlobalLock或@GlobalTransaction
-    2.脏写 必须使用@GlobalTransaction
-    注:如果你查询的业务的接口没有GlobalTransactional 包裹,也就是这个方法上压根没有分布式事务的需求,这时你可以在方法上标注@GlobalLock 注解,并且在查询语句上加 for update。
-        如果你查询的接口在事务链路上外层有GlobalTransactional注解,那么你查询的语句只要加for update就行。设计这个注解的原因是在没有这个注解之前,需要查询分布式事务读已提交的数据,但业务本身不需要分布式事务。
-        若使用GlobalTransactional注解就会增加一些没用的额外的rpc开销比如begin 返回xid,提交事务等。GlobalLock简化了rpc过程,使其做到更高的性能。
-
-

Q: 脏数据回滚失败如何处理

-
A: 
-    1.脏数据需手动处理,根据日志提示修正数据或者将对应undo删除(可自定义实现FailureHandler做邮件通知或其他)
-    2.关闭回滚时undo镜像校验,不推荐该方案。
-    注:建议事前做好隔离保证无脏数据
-
-

Q: 分支事务注册时全局事务状态不是begin

-
A:  
-    异常:Could not register branch into global session xid = status = Rollbacked(还有Rollbacking、AsyncCommitting等等二阶段状态) while expecting Begin
-    描述:分支事务注册时,全局事务状态需是一阶段状态begin,非begin不允许注册。属于seata框架层面正常的处理,用户可以从自身业务层面解决。
-    有以下几种情况会出现该异常(可继续补充)
-    1.分支事务是异步,全局事务无法感知它的执行进度,全局事务已进入二阶段,该异步分支才来注册
-    2.服务a rpc 服务b超时(dubbo、feign等默认1秒超时),a上抛异常给tm,tm通知tc回滚,但是b还是收到了请求(网络延迟或rpc框架重试),然后去tc注册时发现全局事务已在回滚
-    3.tc感知全局事务超时(@GlobalTransactional(timeoutMills = 默认60秒)),主动变更状态并通知各分支事务回滚,此时有新的分支事务来注册
-
-

Q: Nacos 作为 Seata 配置中心时,项目启动报错找不到服务,如何排查,如何处理

-
A: 异常:io.seata.common.exception.FrameworkException: can not register RM,err:can not connect to services-server.
-    1.查看nacos配置列表,seata配置是否已经导入成功
-    2.查看nacos服务列表,serverAddr是否已经注册成功
-    3.检查client端的registry.conf里面的namespace,registry.nacos.namespace和config.nacos.namespace填入nacos的命名空间ID,默认"",server端和client端对应,namespace
-为public是nacos的一个保留控件,如果您需要创建自己的namespace,最好不要和public重名,以一个实际业务场景有具体语义的名字来命名
-    4.nacos上服务列表,serverAddr地址对应ip地址应为seata启动指定ip地址,如:sh seata-server.sh -p 8091 -h 122.51.204.197 -m file
-    5.查看seata/conf/nacos-config.txt 事务分组service.vgroup_mapping.trade_group=default配置与项目分组配置名称是否一致
-    6.telnet ip 端口 查看端口是都开放,以及防火墙状态
-    注:1.080版本启动指定ip问题,出现异常Exception in thread "main" java.lang.RuntimeException: java.net.BindException: Cannot addign request address,请升级到081以上版本
-       2.项目使用jdk13,启动出现Error: Could not create the Java Virtual Machine.
-                              Error: A fatal exception has occurred. Program will exit.
-        如环境为sh,替换脚本中最后一段:
-        exec "$JAVACMD" $JAVA_OPTS -server -Xmx2048m -Xms2048m -Xmn1024m -Xss512k -XX:SurvivorRatio=10 -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=256m -XX:
-MaxDirectMemorySize=1024m -XX:-OmitStackTraceInFastThrow -XX:-UseAdaptiveSizePolicy -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath="$BASEDIR"/logs
-/java_heapdump.hprof -XX:+DisableExplicitGC -XX:+CMSParallelRemarkEnabled -XX:+
-UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=75 -verbose:gc -Dio.netty.leakDetectionLevel=advanced \
-          -classpath "$CLASSPATH" \
-          -Dapp.name="seata-server" \
-          -Dapp.pid="$$" \
-          -Dapp.repo="$REPO" \
-          -Dapp.home="$BASEDIR" \
-          -Dbasedir="$BASEDIR" \
-          io.seata.server.Server \
-          "$@"
-
-
- - - - - - diff --git a/zh-cn/docs/faq/faq.json b/zh-cn/docs/faq/faq.json deleted file mode 100644 index edaf17d0..00000000 --- a/zh-cn/docs/faq/faq.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "filename": "faq.md", - "__html": "

常见问题

\n

Q: java.lang.NoSuchMethodError: com.fasterxml.jackson.databind.jsontype.TypeSerializer.typeId(Ljava/lang/Object;Lcom/fasterxml/jackson/core/JsonToken;)

\n
A: undolog序列化配置为jackson时,jackson版本需要为2.9.9+\n
\n

Q: seata高可用

\n
A: 0.6版本开始支持,tc使用db模式共享全局事务会话信息,注册中心使用非file的seata支持的第三方注册中心\n
\n

Q: undo_log表log_status=1的记录

\n
A: 场景:分支事务a注册TC后,a的本地事务提交前发生了全局事务回滚\n   后果:全局事务回滚成功,a资源被占用掉,产生了资源悬挂问题\n   防悬挂措施:a回滚时发现回滚undo还未插入,则插入一条log_status=1的undo记录,a本地事务(业务写操作sql和对应undo为一个本地事务)提交时会因为undo表主键冲突而提交失败。\n
\n

Q: 隔离性

\n
A: 因seata一阶段本地事务已提交,为防止其他事务脏读脏写需要加强隔离。\n    1.脏读 select语句加for update,代理方法增加@GlobalLock或@GlobalTransaction\n    2.脏写 必须使用@GlobalTransaction\n    注:如果你查询的业务的接口没有GlobalTransactional 包裹,也就是这个方法上压根没有分布式事务的需求,这时你可以在方法上标注@GlobalLock 注解,并且在查询语句上加 for update。\n        如果你查询的接口在事务链路上外层有GlobalTransactional注解,那么你查询的语句只要加for update就行。设计这个注解的原因是在没有这个注解之前,需要查询分布式事务读已提交的数据,但业务本身不需要分布式事务。\n        若使用GlobalTransactional注解就会增加一些没用的额外的rpc开销比如begin 返回xid,提交事务等。GlobalLock简化了rpc过程,使其做到更高的性能。\n
\n

Q: 脏数据回滚失败如何处理

\n
A: \n    1.脏数据需手动处理,根据日志提示修正数据或者将对应undo删除(可自定义实现FailureHandler做邮件通知或其他)\n    2.关闭回滚时undo镜像校验,不推荐该方案。\n    注:建议事前做好隔离保证无脏数据\n
\n

Q: 分支事务注册时全局事务状态不是begin

\n
A:  \n    异常:Could not register branch into global session xid = status = Rollbacked(还有Rollbacking、AsyncCommitting等等二阶段状态) while expecting Begin\n    描述:分支事务注册时,全局事务状态需是一阶段状态begin,非begin不允许注册。属于seata框架层面正常的处理,用户可以从自身业务层面解决。\n    有以下几种情况会出现该异常(可继续补充)\n    1.分支事务是异步,全局事务无法感知它的执行进度,全局事务已进入二阶段,该异步分支才来注册\n    2.服务a rpc 服务b超时(dubbo、feign等默认1秒超时),a上抛异常给tm,tm通知tc回滚,但是b还是收到了请求(网络延迟或rpc框架重试),然后去tc注册时发现全局事务已在回滚\n    3.tc感知全局事务超时(@GlobalTransactional(timeoutMills = 默认60秒)),主动变更状态并通知各分支事务回滚,此时有新的分支事务来注册\n
\n

Q: Nacos 作为 Seata 配置中心时,项目启动报错找不到服务,如何排查,如何处理

\n
A: 异常:io.seata.common.exception.FrameworkException: can not register RM,err:can not connect to services-server.\n    1.查看nacos配置列表,seata配置是否已经导入成功\n    2.查看nacos服务列表,serverAddr是否已经注册成功\n    3.检查client端的registry.conf里面的namespace,registry.nacos.namespace和config.nacos.namespace填入nacos的命名空间ID,默认"",server端和client端对应,namespace\n为public是nacos的一个保留控件,如果您需要创建自己的namespace,最好不要和public重名,以一个实际业务场景有具体语义的名字来命名\n    4.nacos上服务列表,serverAddr地址对应ip地址应为seata启动指定ip地址,如:sh seata-server.sh -p 8091 -h 122.51.204.197 -m file\n    5.查看seata/conf/nacos-config.txt 事务分组service.vgroup_mapping.trade_group=default配置与项目分组配置名称是否一致\n    6.telnet ip 端口 查看端口是都开放,以及防火墙状态\n    注:1.080版本启动指定ip问题,出现异常Exception in thread "main" java.lang.RuntimeException: java.net.BindException: Cannot addign request address,请升级到081以上版本\n       2.项目使用jdk13,启动出现Error: Could not create the Java Virtual Machine.\n                              Error: A fatal exception has occurred. Program will exit.\n        如环境为sh,替换脚本中最后一段:\n        exec "$JAVACMD" $JAVA_OPTS -server -Xmx2048m -Xms2048m -Xmn1024m -Xss512k -XX:SurvivorRatio=10 -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=256m -XX:\nMaxDirectMemorySize=1024m -XX:-OmitStackTraceInFastThrow -XX:-UseAdaptiveSizePolicy -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath="$BASEDIR"/logs\n/java_heapdump.hprof -XX:+DisableExplicitGC -XX:+CMSParallelRemarkEnabled -XX:+\nUseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=75 -verbose:gc -Dio.netty.leakDetectionLevel=advanced \\\n          -classpath "$CLASSPATH" \\\n          -Dapp.name="seata-server" \\\n          -Dapp.pid="$$" \\\n          -Dapp.repo="$REPO" \\\n          -Dapp.home="$BASEDIR" \\\n          -Dbasedir="$BASEDIR" \\\n          io.seata.server.Server \\\n          "$@"\n
\n", - "link": "/zh-cn/docs/faq/faq.html", - "meta": {} -} \ No newline at end of file diff --git a/zh-cn/docs/ops/config-center.html b/zh-cn/docs/ops/config-center.html deleted file mode 100644 index 3db3486f..00000000 --- a/zh-cn/docs/ops/config-center.html +++ /dev/null @@ -1,33 +0,0 @@ - - - - - - - - - - 配置中心初始化 - - - - -
文档

配置中心初始化

-
- - - - - - - diff --git a/zh-cn/docs/ops/config-center.json b/zh-cn/docs/ops/config-center.json deleted file mode 100644 index 89e97946..00000000 --- a/zh-cn/docs/ops/config-center.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "config-center.md", - "__html": "

配置中心初始化

\n", - "link": "/zh-cn/docs/ops/config-center.html", - "meta": { - "title": "配置中心初始化", - "keywords": "Seata", - "description": "配置中心初始化。" - } -} \ No newline at end of file diff --git a/zh-cn/docs/ops/deploy-by-docker.html b/zh-cn/docs/ops/deploy-by-docker.html deleted file mode 100644 index 5f454183..00000000 --- a/zh-cn/docs/ops/deploy-by-docker.html +++ /dev/null @@ -1,114 +0,0 @@ - - - - - - - - - - 使用 Docker 部署 Seata Server - - - - -
文档

使用 Docker 部署 Seata Server

-

快速开始

-

启动seata-server实例

-
$ docker run --name seata-server -p 8091:8091 seataio/seata-server:latest
-
-

指定自定义配置文件启动

-
$ docker run --name seata-server \
-        -p 8091:8091 \
-        -e SEATA_CONFIG_NAME=file:/root/seata-config/registry \
-        -v /PATH/TO/CONFIG_FILE:/root/seata-config  \
-        seataio/seata-server
-
-

指定seata-server IP 启动

-
$ docker run --name seata-server \
-        -p 8091:8091 \
-        -e SEATA_IP=192.168.1.1 \
-        seataio/seata-server
-
-

Docker compose 启动

-

docker-conmpose.yaml 示例

-
version: "3"
-services:
-  seata-server:
-    image: seataio/seata-server
-    hostname: seata-server
-    ports:
-      - "8091:8091"
-    environment:
-      - SEATA_PORT=8091
-      - STORE_MODE=file
-
-

容器命令行及查看日志

-
$ docker exec -it seata-server sh
-
-
$ tail -f /root/logs/seata/seata-server.log
-
-

使用自定义配置文件

-

默认的配置文件路径为 /seata-server/resources, 建议将自定义配置文件放到其他目录下; 使用自定义配置文件时必须指定环境变量 SEATA_CONFIG_NAME, 并且环境变量的值需要以file:开始, 如: file:/root/seata-config/registry

-
$ docker run --name seata-server \
-        -p 8091:8091 \
-        -e SEATA_CONFIG_NAME=file:/root/seata-config/registry \
-        -v /PATH/TO/CONFIG_FILE:/root/seata-config  \
-        seataio/seata-server
-
-

环境变量

-

seata-server 支持以下环境变量:

-
    -
  • SEATA_IP
  • -
-
-

可选, 指定seata-server启动的IP, 该IP用于向注册中心注册时使用, 如eureka等

-
-
    -
  • SEATA_PORT
  • -
-
-

可选, 指定seata-server启动的端口, 默认为 8091

-
-
    -
  • STORE_MODE
  • -
-
-

可选, 指定seata-server的事务日志存储方式, 支持dbfile, 默认是 file

-
-
    -
  • SERVER_NODE
  • -
-
-

可选, 用于指定seata-server节点ID, 如 1,2,3..., 默认为 1

-
-
    -
  • SEATA_ENV
  • -
-
-

可选, 指定 seata-server 运行环境, 如 dev, test 等, 服务启动时会使用 registry-dev.conf 这样的配置

-
-
    -
  • SEATA_CONFIG_NAME
  • -
-
-

可选, 指定配置文件位置, 如 file:/root/registry, 将会加载 /root/registry.conf 作为配置文件

-
-
- - - - - - - diff --git a/zh-cn/docs/ops/deploy-by-docker.json b/zh-cn/docs/ops/deploy-by-docker.json deleted file mode 100644 index fe7c6d60..00000000 --- a/zh-cn/docs/ops/deploy-by-docker.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "filename": "deploy-by-docker.md", - "__html": "

使用 Docker 部署 Seata Server

\n

快速开始

\n

启动seata-server实例

\n
$ docker run --name seata-server -p 8091:8091 seataio/seata-server:latest\n
\n

指定自定义配置文件启动

\n
$ docker run --name seata-server \\\n        -p 8091:8091 \\\n        -e SEATA_CONFIG_NAME=file:/root/seata-config/registry \\\n        -v /PATH/TO/CONFIG_FILE:/root/seata-config  \\\n        seataio/seata-server\n
\n

指定seata-server IP 启动

\n
$ docker run --name seata-server \\\n        -p 8091:8091 \\\n        -e SEATA_IP=192.168.1.1 \\\n        seataio/seata-server\n
\n

Docker compose 启动

\n

docker-conmpose.yaml 示例

\n
version: \"3\"\nservices:\n  seata-server:\n    image: seataio/seata-server\n    hostname: seata-server\n    ports:\n      - \"8091:8091\"\n    environment:\n      - SEATA_PORT=8091\n      - STORE_MODE=file\n
\n

容器命令行及查看日志

\n
$ docker exec -it seata-server sh\n
\n
$ tail -f /root/logs/seata/seata-server.log\n
\n

使用自定义配置文件

\n

默认的配置文件路径为 /seata-server/resources, 建议将自定义配置文件放到其他目录下; 使用自定义配置文件时必须指定环境变量 SEATA_CONFIG_NAME, 并且环境变量的值需要以file:开始, 如: file:/root/seata-config/registry

\n
$ docker run --name seata-server \\\n        -p 8091:8091 \\\n        -e SEATA_CONFIG_NAME=file:/root/seata-config/registry \\\n        -v /PATH/TO/CONFIG_FILE:/root/seata-config  \\\n        seataio/seata-server\n
\n

环境变量

\n

seata-server 支持以下环境变量:

\n
    \n
  • SEATA_IP
  • \n
\n
\n

可选, 指定seata-server启动的IP, 该IP用于向注册中心注册时使用, 如eureka等

\n
\n
    \n
  • SEATA_PORT
  • \n
\n
\n

可选, 指定seata-server启动的端口, 默认为 8091

\n
\n
    \n
  • STORE_MODE
  • \n
\n
\n

可选, 指定seata-server的事务日志存储方式, 支持dbfile, 默认是 file

\n
\n
    \n
  • SERVER_NODE
  • \n
\n
\n

可选, 用于指定seata-server节点ID, 如 1,2,3..., 默认为 1

\n
\n
    \n
  • SEATA_ENV
  • \n
\n
\n

可选, 指定 seata-server 运行环境, 如 dev, test 等, 服务启动时会使用 registry-dev.conf 这样的配置

\n
\n
    \n
  • SEATA_CONFIG_NAME
  • \n
\n
\n

可选, 指定配置文件位置, 如 file:/root/registry, 将会加载 /root/registry.conf 作为配置文件

\n
\n", - "link": "/zh-cn/docs/ops/deploy-by-docker.html", - "meta": { - "hidden": "true", - "title": "使用 Docker 部署 Seata Server", - "keywords": "docker,docker-compose,ops", - "description": "使用 Docker 部署 Seata Server", - "author": "helloworlde", - "date": "2019-11-25" - } -} \ No newline at end of file diff --git a/zh-cn/docs/ops/deploy-by-helm.html b/zh-cn/docs/ops/deploy-by-helm.html deleted file mode 100644 index 1049d195..00000000 --- a/zh-cn/docs/ops/deploy-by-helm.html +++ /dev/null @@ -1,69 +0,0 @@ - - - - - - - - - - 使用 Helm 部署 Seata Server - - - - -
文档

使用 Helm 部署 Seata Server

-

快速启动

-
$ cd ./script/server/helm/seata-server
-$ helm install seata-server ./seata-server
-
-

自定义配置

-

环境变量

-

支持的环境变量和 Docker 相同,可以参考 使用 Docker 部署 Seata Server

-

使用自定义配置文件

-

指定配置文件可以通过挂载的方式实现,如将/root/workspace/seata/seata-config/file 下的配置文件挂载到 pod 中,挂载后需要通过指定 SEATA_CONFIG_NAME 指定配置文件位置,并且环境变量的值需要以file:开始, 如: file:/root/seata-config/registry

-
    -
  • Values.yaml
  • -
-
replicaCount: 1
-
-namespace: default
-
-image:
-  repository: seataio/seata-server
-  tag: latest
-  pullPolicy: IfNotPresent
-
-service:
-  type: NodePort
-  port: 30091
-
-env:
-  seataPort: "8091"
-  storeMode: "file"
-  seataIp: "127.0.0.1"
-  seataConfigName: "file:/root/seata-config/registry"
-
-volume:
-  - name: seata-config
-    mountPath: /root/seata-config
-    hostPath: /root/workspace/seata/seata-config/file
-
-
- - - - - - - diff --git a/zh-cn/docs/ops/deploy-by-helm.json b/zh-cn/docs/ops/deploy-by-helm.json deleted file mode 100644 index 0c5efd6d..00000000 --- a/zh-cn/docs/ops/deploy-by-helm.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "filename": "deploy-by-helm.md", - "__html": "

使用 Helm 部署 Seata Server

\n

快速启动

\n
$ cd ./script/server/helm/seata-server\n$ helm install seata-server ./seata-server\n
\n

自定义配置

\n

环境变量

\n

支持的环境变量和 Docker 相同,可以参考 使用 Docker 部署 Seata Server

\n

使用自定义配置文件

\n

指定配置文件可以通过挂载的方式实现,如将/root/workspace/seata/seata-config/file 下的配置文件挂载到 pod 中,挂载后需要通过指定 SEATA_CONFIG_NAME 指定配置文件位置,并且环境变量的值需要以file:开始, 如: file:/root/seata-config/registry

\n
    \n
  • Values.yaml
  • \n
\n
replicaCount: 1\n\nnamespace: default\n\nimage:\n  repository: seataio/seata-server\n  tag: latest\n  pullPolicy: IfNotPresent\n\nservice:\n  type: NodePort\n  port: 30091\n\nenv:\n  seataPort: \"8091\"\n  storeMode: \"file\"\n  seataIp: \"127.0.0.1\"\n  seataConfigName: \"file:/root/seata-config/registry\"\n\nvolume:\n  - name: seata-config\n    mountPath: /root/seata-config\n    hostPath: /root/workspace/seata/seata-config/file\n
\n", - "link": "/zh-cn/docs/ops/deploy-by-helm.html", - "meta": { - "hidden": "true", - "title": "使用 Helm 部署 Seata Server", - "keywords": "kubernetes,helm,ops", - "description": "使用 Helm 部署 Seata Server", - "author": "helloworlde", - "date": "2019-12-01" - } -} \ No newline at end of file diff --git a/zh-cn/docs/ops/deploy-by-kubernetes.html b/zh-cn/docs/ops/deploy-by-kubernetes.html deleted file mode 100644 index dc7d165b..00000000 --- a/zh-cn/docs/ops/deploy-by-kubernetes.html +++ /dev/null @@ -1,135 +0,0 @@ - - - - - - - - - - 使用 Kubernetes 部署 Seata Server - - - - -
文档

使用 Kubernetes 部署 Seata Server

-

快速启动

-

创建 seata-server.yaml

-
apiVersion: v1
-kind: Service
-metadata:
-  name: seata-server
-  namespace: default
-  labels:
-    k8s-app: seata-server
-spec:
-  type: NodePort
-  ports:
-    - port: 8091
-      nodePort: 30091
-      protocol: TCP
-      name: http
-  selector:
-    k8s-app: seata-server
-
----
-
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: seata-server
-  namespace: default
-  labels:
-    k8s-app: seata-server
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      k8s-app: seata-server
-  template:
-    metadata:
-      labels:
-        k8s-app: seata-server
-    spec:
-      containers:
-        - name: seata-server
-          image: docker.io/seataio/seata-server:latest
-          imagePullPolicy: IfNotPresent
-          env:
-            - name: SEATA_PORT
-              value: "8091"
-            - name: STORE_MODE
-              value: file
-          ports:
-            - name: http
-              containerPort: 8091
-              protocol: TCP
-
-
$ kubectl apply -f seata-server.yaml
-
-

自定义配置

-

环境变量

-

支持的环境变量和 Docker 相同,可以参考 使用 Docker 部署 Seata Server

-

使用自定义配置文件

-

指定配置文件可以通过挂载的方式实现,如将/root/workspace/seata/seata-config/file 下的配置文件挂载到pod 中,挂载后需要通过指定 SEATA_CONFIG_NAME 指定配置文件位置,并且环境变量的值需要以file:开始, 如: file:/root/seata-config/registry

-
    -
  • Deployment
  • -
-
apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: seata-server
-  namespace: default
-  labels:
-    k8s-app: seata-server
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      k8s-app: seata-server
-  template:
-    metadata:
-      labels:
-        k8s-app: seata-server
-    spec:
-      containers:
-        - name: seata-server
-          image: docker.io/seataio/seata-server:latest
-          imagePullPolicy: IfNotPresent
-          env:
-            - name: SEATA_PORT
-              value: "8091"
-            - name: STORE_MODE
-              value: file
-            - name: SEATA_CONFIG_NAME
-              value: file:/root/seata-config/registry
-          ports:
-            - name: http
-              containerPort: 8091
-              protocol: TCP
-          volumeMounts:
-            - name: seata-config
-              mountPath: /root/seata-config
-      volumes:
-        - name: seata-config
-          hostPath:
-            path: /root/workspace/seata/seata-config/file
-
-
- - - - - - - diff --git a/zh-cn/docs/ops/deploy-by-kubernetes.json b/zh-cn/docs/ops/deploy-by-kubernetes.json deleted file mode 100644 index 1b04ee76..00000000 --- a/zh-cn/docs/ops/deploy-by-kubernetes.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "filename": "deploy-by-kubernetes.md", - "__html": "

使用 Kubernetes 部署 Seata Server

\n

快速启动

\n

创建 seata-server.yaml

\n
apiVersion: v1\nkind: Service\nmetadata:\n  name: seata-server\n  namespace: default\n  labels:\n    k8s-app: seata-server\nspec:\n  type: NodePort\n  ports:\n    - port: 8091\n      nodePort: 30091\n      protocol: TCP\n      name: http\n  selector:\n    k8s-app: seata-server\n\n---\n\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: seata-server\n  namespace: default\n  labels:\n    k8s-app: seata-server\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      k8s-app: seata-server\n  template:\n    metadata:\n      labels:\n        k8s-app: seata-server\n    spec:\n      containers:\n        - name: seata-server\n          image: docker.io/seataio/seata-server:latest\n          imagePullPolicy: IfNotPresent\n          env:\n            - name: SEATA_PORT\n              value: \"8091\"\n            - name: STORE_MODE\n              value: file\n          ports:\n            - name: http\n              containerPort: 8091\n              protocol: TCP\n
\n
$ kubectl apply -f seata-server.yaml\n
\n

自定义配置

\n

环境变量

\n

支持的环境变量和 Docker 相同,可以参考 使用 Docker 部署 Seata Server

\n

使用自定义配置文件

\n

指定配置文件可以通过挂载的方式实现,如将/root/workspace/seata/seata-config/file 下的配置文件挂载到pod 中,挂载后需要通过指定 SEATA_CONFIG_NAME 指定配置文件位置,并且环境变量的值需要以file:开始, 如: file:/root/seata-config/registry

\n
    \n
  • Deployment
  • \n
\n
apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: seata-server\n  namespace: default\n  labels:\n    k8s-app: seata-server\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      k8s-app: seata-server\n  template:\n    metadata:\n      labels:\n        k8s-app: seata-server\n    spec:\n      containers:\n        - name: seata-server\n          image: docker.io/seataio/seata-server:latest\n          imagePullPolicy: IfNotPresent\n          env:\n            - name: SEATA_PORT\n              value: \"8091\"\n            - name: STORE_MODE\n              value: file\n            - name: SEATA_CONFIG_NAME\n              value: file:/root/seata-config/registry\n          ports:\n            - name: http\n              containerPort: 8091\n              protocol: TCP\n          volumeMounts:\n            - name: seata-config\n              mountPath: /root/seata-config\n      volumes:\n        - name: seata-config\n          hostPath:\n            path: /root/workspace/seata/seata-config/file\n
\n", - "link": "/zh-cn/docs/ops/deploy-by-kubernetes.html", - "meta": { - "hidden": "true", - "title": "使用 Kubernetes 部署 Seata Server", - "keywords": "kubernetes,ops", - "description": "使用 Kubernetes 部署 Seata Server", - "author": "helloworlde", - "date": "2019-12-01" - } -} \ No newline at end of file diff --git a/zh-cn/docs/ops/deploy-guide-beginner.html b/zh-cn/docs/ops/deploy-guide-beginner.html deleted file mode 100644 index 56c09408..00000000 --- a/zh-cn/docs/ops/deploy-guide-beginner.html +++ /dev/null @@ -1,314 +0,0 @@ - - - - - - - - - - Seata部署指南 - - - - -
文档

部署指南

-

Seata新手部署指南(1.0.0版本)

-

Seata分TC、TM和RM三个角色,TC(Server端)为单独服务端部署,TM和RM(Client端)由业务系统集成。

-

资源目录介绍

-
    -
  • script
  • -
-
-

seata根目录-->script,存放client所需脚本、配置,各个配置中心配置参数脚本(Server和Client并存)

-
-
    -
  • server
  • -
-
-

seata根目录-->seata-server,存放db模式所需建表脚本,server端配置参数在script。

-
-

注意事项

-
    -
  • seata-spring-boot-starter
  • -
-
-

1.0.0可用于替换seata-all,GlobalTransactionScanner自动初始化(依赖SpringUtils)
-若其他途径实现GlobalTransactionScanner初始化,请保证io.seata.spring.boot.autoconfigure.util.SpringUtils先初始化;
-starter默认开启数据源自动代理,用户若再手动配置DataSourceProxy将会导致异常

-
-
    -
  • spring-cloud-alibaba-seata
  • -
-
-

截止20191207日,现在版本不能与seata-spring-boot-starter兼容,后续sca会提供新的seata集成版本。
-可以手动改造让SpringUtils先初始化,以实现兼容。

-
-

启动Server

-

Server端存储模式(store.mode)现有file、db两种(后续将引入raft),file模式无需改动,直接启动即可,下面专门讲下db启动步骤。
-注:file模式为单机模式,全局事务会话信息内存中读写并持久化本地文件root.data,性能较高;
-db模式为高可用模式,全局事务会话信息通过db共享,相应性能差些。

-

步骤一:启动包

- -

步骤二:建表

-

全局事务会话信息由3块内容构成,全局事务-->分支事务-->全局锁,对应表global_table、branch_table、lock_table,

- -

步骤三:修改store.mode

-

打开seata-server-->resources-->file.conf,修改store.mode="db";也可以在启动时加命令参数-m db指定。

-

步骤四:修改数据库连接

-

打开seata-server-->resources-->file.conf,修改store.db相关属性

-

步骤五:启动

-
    -
  • 源码启动: 执行Server.java的main方法
  • -
  • 命令启动: seata-server.sh -h 127.0.0.1 -p 8091 -m db -n 1 -DSEATA_ENV=test
  • -
-
    -h: 注册到注册中心的ip
-    -p: Server rpc 监听端口
-    -m: 全局事务会话信息存储模式,file、db,优先读取启动参数
-    -n: Server node,多个Server时,需区分各自节点,用于生成不同的transactionId范围,以免冲突
-    SEATA_ENV: 多环境配置参考 https://github.com/seata/seata/wiki/Multi-configuration-Isolation
-
- -

注: 堆内存建议分配4G,堆外内存1-2G

-

业务系统集成Client

-

步骤一:添加seata依赖

-
    -
  • 依赖seata-all
  • -
  • 依赖seata-spring-boot-starter,支持yml配置
  • -
  • 依赖spring-cloud-alibaba-seata,内部集成了seata,并实现了xid传递
  • -
-

步骤二:undo_log建表、配置参数

- -

步骤三:数据源代理

-
    -
  • 0.9.0版本开始seata支持自动代理数据源
  • -
-
    1.0.0: client.support.spring.datasource.autoproxy=true  
-    0.9.0: support.spring.datasource.autoproxy=true
-
-
    -
  • 手动配置可参考下方mybatis的例子
  • -
-
 @Bean
-    @ConfigurationProperties(prefix = "spring.datasource")
-    public DataSource druidDataSource() {
-        DruidDataSource druidDataSource = new DruidDataSource();
-        return druidDataSource;
-    }
-    @Primary
-    @Bean("dataSource")
-    public DataSourceProxy dataSource(DataSource druidDataSource) {
-        return new DataSourceProxy(druidDataSource);
-    }
-    @Bean
-    public MybatisSqlSessionFactoryBean mybatisSqlSessionFactoryBean(DataSourceProxy druidDataSource, ResourcePatternResolver resourcePatternResolver) throws IOException {
-        MybatisSqlSessionFactoryBean mybatisSqlSessionFactoryBean = new MybatisSqlSessionFactoryBean();
-        mybatisSqlSessionFactoryBean.setDataSource(druidDataSource);
-        mybatisSqlSessionFactoryBean.setMapperLocations(resourcePatternResolver.getResources("classpath:mapper/*.xml"));
-        return mybatisSqlSessionFactoryBean;
-    }
-
-

步骤四:初始化GlobalTransactionScanner

-
    -
  • 手动
  • -
-
       public GlobalTransactionScanner globalTransactionScanner() {
-           String applicationName = this.applicationContext.getEnvironment().getProperty("spring.application.name");
-           String txServiceGroup = this.seataProperties.getTxServiceGroup();
-           if (StringUtils.isEmpty(txServiceGroup)) {
-               txServiceGroup = applicationName + "-fescar-service-group";
-               this.seataProperties.setTxServiceGroup(txServiceGroup);
-           }
-   
-           return new GlobalTransactionScanner(applicationName, txServiceGroup);
-       }
-
-
    -
  • 自动,引入seata-spring-boot-starter、spring-cloud-alibaba-seata等jar
  • -
-

步骤五:实现xid跨服务传递

-
    -
  • 手动 -参考源码integration文件夹下的各种rpc实现 module
  • -
  • 自动 -springCloud用户可以引入spring-cloud-alibaba-seata,内部已经实现xid传递
  • -
-

事务分组专题简介

-

事务分组可以作为资源的逻辑隔离单位,去注册中心获得相应的TC服务列表。
-seata注册、配置中心分为两类,内置file、第三方注册(配置)中心如nacos等等,注册中心和配置中心之间没有约束,可各自使用不同类型。

-

file注册中心和配置中心

-

Server端

-
registry {
-  # file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
-  type = "file"                ---------------> 使用file作为注册中心
-}
-config {
-  # file、nacos 、apollo、zk、consul、etcd3
-  type = "file"                ---------------> 使用file作为配置中心
-  file {
-    name = "file.conf"
-  }
-}
-
-
    -
  • file、db模式启动server,见文章上方节点:启动Server
  • -
-

Client端

-
registry {
-  # file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
-  type = "file"                ---------------> 使用file作为注册中心
-}
-config {
-  # file、nacos 、apollo、zk、consul、etcd3
-  type = "file"                ---------------> 使用file作为配置中心
-  file {
-    name = "file.conf"         ---------------> 配置参数存储文件
-  }
-}
-spring.cloud.alibaba.seata.tx-service-group=my_test_tx_group ---------------> 事务分组配置
-file.conf: 
-    service {
-      vgroup_mapping.my_test_tx_group = "default"
-      default.grouplist = "127.0.0.1:8091"
-    }
-
-
    -
  • 读取配置
  • -
-
-

通过FileConfiguration本地加载file.conf的配置参数

-
-
    -
  • 获取事务分组
  • -
-
-

spring配置,springboot可配置在yml、properties中,服务启动时加载配置,对应的值"my_test_tx_group"即为一个事务分组名,若不配置,默认获取属性spring.application.name的值+"-fescar-service-group"

-
-
    -
  • 查找TC集群名
  • -
-
-

拿到事务分组名"my_test_tx_group"拼接成"service.vgroup_mapping.my_test_tx_group"查找TC集群名clusterName为"default"

-
-
    -
  • 查询TC服务
  • -
-
-

拼接"service."+clusterName+".grouplist"找到真实TC服务地址127.0.0.1:8091

-
-

nacos注册中心和配置中心

-

Server端

-
registry {
-  # file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
-  type = "nacos"                ---------------> 使用nacos作为注册中心
-  nacos {
-    serverAddr = "localhost"    ---------------> nacos注册中心所在ip
-    namespace = ""              ---------------> nacos命名空间id,""为nacos保留public空间控件,用户勿配置namespace = "public"
-    cluster = "default"         ---------------> seata-server在nacos的集群名
-  }
-}
-config {
-  # file、nacos 、apollo、zk、consul、etcd3
-  type = "nacos"                ---------------> 使用nacos作为配置中心
-  nacos {
-    serverAddr = "localhost"
-    namespace = ""
-    cluster = "default"
-  }
-}
-
-
-
    -
  • 脚本
  • -
-
-

script-->config-center下的3个nacos文件nacos-config.pynacos-config.sh、nacos-config.txt
-txt为参数明细(包含S -.erver和Client),sh为linux脚本,windows可下载git来操作,py为python脚本。

-
-
    -
  • 导入配置
  • -
-
-

用命令执行脚本导入seata配置参数至nacos,在nacos控制台查看配置确认是否成功

-
-
    -
  • 注册TC
  • -
-
-

启动seata-server注册至nacos,查看nacos控制台服务列表确认是否成功

-
-

Client端

-
spring.cloud.alibaba.seata.tx-service-group=my_test_tx_group ---------------> 事务分组配置
-registry {
-  # file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
-  type = "nacos"                ---------------> 从nacos获取TC服务
-  nacos {
-    serverAddr = "localhost"
-    namespace = ""
-  }
-}
-config {
-  # file、nacos 、apollo、zk、consul、etcd3
-  type = "nacos"                ---------------> 使用nacos作为配置中心
-  nacos {
-    serverAddr = "localhost"
-    namespace = ""
-  }
-}
-
-
    -
  • 读取配置
  • -
-
-

通过NacosConfiguration远程读取seata配置参数

-
-
    -
  • 获取事务分组
  • -
-
-

springboot可配置在yml、properties中,服务启动时加载配置,对应的值"my_test_tx_group"即为一个事务分组名,若不配置,默认获取属性spring.application.name的值+"-fescar-service-group"

-
-
    -
  • 查找TC集群名
  • -
-
-

拿到事务分组名"my_test_tx_group"拼接成"service.vgroup_mapping.my_test_tx_group"从配置中心查找到TC集群名clusterName为"default"

-
-
    -
  • 查找TC服务
  • -
-
-

根据serverAddr和namespace以及clusterName在注册中心找到真实TC服务列表

-
-

注:serverAddr和namespace与Server端一致,clusterName与Server端cluster一致

-
- - - - - - - diff --git a/zh-cn/docs/ops/deploy-guide-beginner.json b/zh-cn/docs/ops/deploy-guide-beginner.json deleted file mode 100644 index fe990cd9..00000000 --- a/zh-cn/docs/ops/deploy-guide-beginner.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "deploy-guide-beginner.md", - "__html": "

部署指南

\n

Seata新手部署指南(1.0.0版本)

\n

Seata分TC、TM和RM三个角色,TC(Server端)为单独服务端部署,TM和RM(Client端)由业务系统集成。

\n

资源目录介绍

\n
    \n
  • script
  • \n
\n
\n

seata根目录-->script,存放client所需脚本、配置,各个配置中心配置参数脚本(Server和Client并存)

\n
\n
    \n
  • server
  • \n
\n
\n

seata根目录-->seata-server,存放db模式所需建表脚本,server端配置参数在script。

\n
\n

注意事项

\n
    \n
  • seata-spring-boot-starter
  • \n
\n
\n

1.0.0可用于替换seata-all,GlobalTransactionScanner自动初始化(依赖SpringUtils)
\n若其他途径实现GlobalTransactionScanner初始化,请保证io.seata.spring.boot.autoconfigure.util.SpringUtils先初始化;
\nstarter默认开启数据源自动代理,用户若再手动配置DataSourceProxy将会导致异常

\n
\n
    \n
  • spring-cloud-alibaba-seata
  • \n
\n
\n

截止20191207日,现在版本不能与seata-spring-boot-starter兼容,后续sca会提供新的seata集成版本。
\n可以手动改造让SpringUtils先初始化,以实现兼容。

\n
\n

启动Server

\n

Server端存储模式(store.mode)现有file、db两种(后续将引入raft),file模式无需改动,直接启动即可,下面专门讲下db启动步骤。
\n注:file模式为单机模式,全局事务会话信息内存中读写并持久化本地文件root.data,性能较高;
\ndb模式为高可用模式,全局事务会话信息通过db共享,相应性能差些。

\n

步骤一:启动包

\n\n

步骤二:建表

\n

全局事务会话信息由3块内容构成,全局事务-->分支事务-->全局锁,对应表global_table、branch_table、lock_table,

\n\n

步骤三:修改store.mode

\n

打开seata-server-->resources-->file.conf,修改store.mode="db";也可以在启动时加命令参数-m db指定。

\n

步骤四:修改数据库连接

\n

打开seata-server-->resources-->file.conf,修改store.db相关属性

\n

步骤五:启动

\n
    \n
  • 源码启动: 执行Server.java的main方法
  • \n
  • 命令启动: seata-server.sh -h 127.0.0.1 -p 8091 -m db -n 1 -DSEATA_ENV=test
  • \n
\n
    -h: 注册到注册中心的ip\n    -p: Server rpc 监听端口\n    -m: 全局事务会话信息存储模式,file、db,优先读取启动参数\n    -n: Server node,多个Server时,需区分各自节点,用于生成不同的transactionId范围,以免冲突\n    SEATA_ENV: 多环境配置参考 https://github.com/seata/seata/wiki/Multi-configuration-Isolation\n
\n\n

注: 堆内存建议分配4G,堆外内存1-2G

\n

业务系统集成Client

\n

步骤一:添加seata依赖

\n
    \n
  • 依赖seata-all
  • \n
  • 依赖seata-spring-boot-starter,支持yml配置
  • \n
  • 依赖spring-cloud-alibaba-seata,内部集成了seata,并实现了xid传递
  • \n
\n

步骤二:undo_log建表、配置参数

\n\n

步骤三:数据源代理

\n
    \n
  • 0.9.0版本开始seata支持自动代理数据源
  • \n
\n
    1.0.0: client.support.spring.datasource.autoproxy=true  \n    0.9.0: support.spring.datasource.autoproxy=true\n
\n
    \n
  • 手动配置可参考下方mybatis的例子
  • \n
\n
 @Bean\n    @ConfigurationProperties(prefix = "spring.datasource")\n    public DataSource druidDataSource() {\n        DruidDataSource druidDataSource = new DruidDataSource();\n        return druidDataSource;\n    }\n    @Primary\n    @Bean("dataSource")\n    public DataSourceProxy dataSource(DataSource druidDataSource) {\n        return new DataSourceProxy(druidDataSource);\n    }\n    @Bean\n    public MybatisSqlSessionFactoryBean mybatisSqlSessionFactoryBean(DataSourceProxy druidDataSource, ResourcePatternResolver resourcePatternResolver) throws IOException {\n        MybatisSqlSessionFactoryBean mybatisSqlSessionFactoryBean = new MybatisSqlSessionFactoryBean();\n        mybatisSqlSessionFactoryBean.setDataSource(druidDataSource);\n        mybatisSqlSessionFactoryBean.setMapperLocations(resourcePatternResolver.getResources("classpath:mapper/*.xml"));\n        return mybatisSqlSessionFactoryBean;\n    }\n
\n

步骤四:初始化GlobalTransactionScanner

\n
    \n
  • 手动
  • \n
\n
       public GlobalTransactionScanner globalTransactionScanner() {\n           String applicationName = this.applicationContext.getEnvironment().getProperty("spring.application.name");\n           String txServiceGroup = this.seataProperties.getTxServiceGroup();\n           if (StringUtils.isEmpty(txServiceGroup)) {\n               txServiceGroup = applicationName + "-fescar-service-group";\n               this.seataProperties.setTxServiceGroup(txServiceGroup);\n           }\n   \n           return new GlobalTransactionScanner(applicationName, txServiceGroup);\n       }\n
\n
    \n
  • 自动,引入seata-spring-boot-starter、spring-cloud-alibaba-seata等jar
  • \n
\n

步骤五:实现xid跨服务传递

\n
    \n
  • 手动\n参考源码integration文件夹下的各种rpc实现 module
  • \n
  • 自动\nspringCloud用户可以引入spring-cloud-alibaba-seata,内部已经实现xid传递
  • \n
\n

事务分组专题简介

\n

事务分组可以作为资源的逻辑隔离单位,去注册中心获得相应的TC服务列表。
\nseata注册、配置中心分为两类,内置file、第三方注册(配置)中心如nacos等等,注册中心和配置中心之间没有约束,可各自使用不同类型。

\n

file注册中心和配置中心

\n

Server端

\n
registry {\n  # file 、nacos 、eureka、redis、zk、consul、etcd3、sofa\n  type = "file"                ---------------> 使用file作为注册中心\n}\nconfig {\n  # file、nacos 、apollo、zk、consul、etcd3\n  type = "file"                ---------------> 使用file作为配置中心\n  file {\n    name = "file.conf"\n  }\n}\n
\n
    \n
  • file、db模式启动server,见文章上方节点:启动Server
  • \n
\n

Client端

\n
registry {\n  # file 、nacos 、eureka、redis、zk、consul、etcd3、sofa\n  type = "file"                ---------------> 使用file作为注册中心\n}\nconfig {\n  # file、nacos 、apollo、zk、consul、etcd3\n  type = "file"                ---------------> 使用file作为配置中心\n  file {\n    name = "file.conf"         ---------------> 配置参数存储文件\n  }\n}\nspring.cloud.alibaba.seata.tx-service-group=my_test_tx_group ---------------> 事务分组配置\nfile.conf: \n    service {\n      vgroup_mapping.my_test_tx_group = "default"\n      default.grouplist = "127.0.0.1:8091"\n    }\n
\n
    \n
  • 读取配置
  • \n
\n
\n

通过FileConfiguration本地加载file.conf的配置参数

\n
\n
    \n
  • 获取事务分组
  • \n
\n
\n

spring配置,springboot可配置在yml、properties中,服务启动时加载配置,对应的值"my_test_tx_group"即为一个事务分组名,若不配置,默认获取属性spring.application.name的值+"-fescar-service-group"

\n
\n
    \n
  • 查找TC集群名
  • \n
\n
\n

拿到事务分组名"my_test_tx_group"拼接成"service.vgroup_mapping.my_test_tx_group"查找TC集群名clusterName为"default"

\n
\n
    \n
  • 查询TC服务
  • \n
\n
\n

拼接"service."+clusterName+".grouplist"找到真实TC服务地址127.0.0.1:8091

\n
\n

nacos注册中心和配置中心

\n

Server端

\n
registry {\n  # file 、nacos 、eureka、redis、zk、consul、etcd3、sofa\n  type = "nacos"                ---------------> 使用nacos作为注册中心\n  nacos {\n    serverAddr = "localhost"    ---------------> nacos注册中心所在ip\n    namespace = ""              ---------------> nacos命名空间id,""为nacos保留public空间控件,用户勿配置namespace = "public"\n    cluster = "default"         ---------------> seata-server在nacos的集群名\n  }\n}\nconfig {\n  # file、nacos 、apollo、zk、consul、etcd3\n  type = "nacos"                ---------------> 使用nacos作为配置中心\n  nacos {\n    serverAddr = "localhost"\n    namespace = ""\n    cluster = "default"\n  }\n}\n\n
\n
    \n
  • 脚本
  • \n
\n
\n

script-->config-center下的3个nacos文件nacos-config.pynacos-config.sh、nacos-config.txt
\ntxt为参数明细(包含S\n.erver和Client),sh为linux脚本,windows可下载git来操作,py为python脚本。

\n
\n
    \n
  • 导入配置
  • \n
\n
\n

用命令执行脚本导入seata配置参数至nacos,在nacos控制台查看配置确认是否成功

\n
\n
    \n
  • 注册TC
  • \n
\n
\n

启动seata-server注册至nacos,查看nacos控制台服务列表确认是否成功

\n
\n

Client端

\n
spring.cloud.alibaba.seata.tx-service-group=my_test_tx_group ---------------> 事务分组配置\nregistry {\n  # file 、nacos 、eureka、redis、zk、consul、etcd3、sofa\n  type = "nacos"                ---------------> 从nacos获取TC服务\n  nacos {\n    serverAddr = "localhost"\n    namespace = ""\n  }\n}\nconfig {\n  # file、nacos 、apollo、zk、consul、etcd3\n  type = "nacos"                ---------------> 使用nacos作为配置中心\n  nacos {\n    serverAddr = "localhost"\n    namespace = ""\n  }\n}\n
\n
    \n
  • 读取配置
  • \n
\n
\n

通过NacosConfiguration远程读取seata配置参数

\n
\n
    \n
  • 获取事务分组
  • \n
\n
\n

springboot可配置在yml、properties中,服务启动时加载配置,对应的值"my_test_tx_group"即为一个事务分组名,若不配置,默认获取属性spring.application.name的值+"-fescar-service-group"

\n
\n
    \n
  • 查找TC集群名
  • \n
\n
\n

拿到事务分组名"my_test_tx_group"拼接成"service.vgroup_mapping.my_test_tx_group"从配置中心查找到TC集群名clusterName为"default"

\n
\n
    \n
  • 查找TC服务
  • \n
\n
\n

根据serverAddr和namespace以及clusterName在注册中心找到真实TC服务列表

\n
\n

注:serverAddr和namespace与Server端一致,clusterName与Server端cluster一致

\n", - "link": "/zh-cn/docs/ops/deploy-guide-beginner.html", - "meta": { - "title": "Seata部署指南", - "keywords": "Seata", - "description": "Seata分TC、TM和RM三个角色,TC(Server端)为单独服务端部署,TM和RM(Client端)由业务系统集成。" - } -} \ No newline at end of file diff --git a/zh-cn/docs/ops/deploy-server.html b/zh-cn/docs/ops/deploy-server.html deleted file mode 100644 index 97ec2dd0..00000000 --- a/zh-cn/docs/ops/deploy-server.html +++ /dev/null @@ -1,108 +0,0 @@ - - - - - - - - - - 部署 Server - - - - -
文档

部署 Server

-

Server支持多种方式部署:直接部署,使用 Docker, 使用 Docker-Compose, 使用 Kubernetes, 使用 Helm.

-

直接部署

-
    -
  1. -

    RELEASE页面下载相应版本并解压

    -
  2. -
  3. -

    直接启动

    -
  4. -
-

在 Linux/Mac 下

-
$ sh ./bin/seata-server.sh
-
-

在 Windows 下

-
bin\seata-server.bat
-
-

支持的启动参数

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
参数全写作用备注
-h--host指定在注册中心注册的 IP不指定时获取当前的 IP,外部访问部署在云环境和容器中的 server 建议指定
-p--port指定 server 启动的端口默认为 8091
-m--storeMode事务日志存储方式支持filedb,默认为 file
-n--serverNode用于指定seata-server节点ID,如 1,2,3..., 默认为 1
-e--seataEnv指定 seata-server 运行环境dev, test 等, 服务启动时会使用 registry-dev.conf 这样的配置
-

如:

-
$ sh ./bin/seata-server.sh -p 8091 -h 127.0.0.1 -m file
-
-

容器部署

-

容器部署当前支持三种方式:

- -
- - - - - - - diff --git a/zh-cn/docs/ops/deploy-server.json b/zh-cn/docs/ops/deploy-server.json deleted file mode 100644 index dacfa7e8..00000000 --- a/zh-cn/docs/ops/deploy-server.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "deploy-server.md", - "__html": "

部署 Server

\n

Server支持多种方式部署:直接部署,使用 Docker, 使用 Docker-Compose, 使用 Kubernetes, 使用 Helm.

\n

直接部署

\n
    \n
  1. \n

    RELEASE页面下载相应版本并解压

    \n
  2. \n
  3. \n

    直接启动

    \n
  4. \n
\n

在 Linux/Mac 下

\n
$ sh ./bin/seata-server.sh\n
\n

在 Windows 下

\n
bin\\seata-server.bat\n
\n

支持的启动参数

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
参数全写作用备注
-h--host指定在注册中心注册的 IP不指定时获取当前的 IP,外部访问部署在云环境和容器中的 server 建议指定
-p--port指定 server 启动的端口默认为 8091
-m--storeMode事务日志存储方式支持filedb,默认为 file
-n--serverNode用于指定seata-server节点ID,如 1,2,3..., 默认为 1
-e--seataEnv指定 seata-server 运行环境dev, test 等, 服务启动时会使用 registry-dev.conf 这样的配置
\n

如:

\n
$ sh ./bin/seata-server.sh -p 8091 -h 127.0.0.1 -m file\n
\n

容器部署

\n

容器部署当前支持三种方式:

\n\n", - "link": "/zh-cn/docs/ops/deploy-server.html", - "meta": { - "title": "部署 Server", - "keywords": "Seata", - "description": "Server支持多种方式部署:直接部署,使用 Docker, 使用 Docker-Compose, 使用 Kubernetes, 使用 Helm。" - } -} \ No newline at end of file diff --git a/zh-cn/docs/ops/operation.html b/zh-cn/docs/ops/operation.html deleted file mode 100644 index 4bb4fd88..00000000 --- a/zh-cn/docs/ops/operation.html +++ /dev/null @@ -1,91 +0,0 @@ - - - - - - - - - - 运维指南 - - - - -
文档

运维指南

-

Metrics配置指南

-

Seata支持在TC、TM和RM三个角色开启Metrics数据采集并输出到Prometheus监控系统中。

-

在TC中配置开启Metrics

-

步骤一:在Seata Server中增加Metrics的依赖并重新编译Server

-

打开Seata Server源代码的pom,添加Metrics依赖:

-
<dependency>
-	<groupId>${project.groupId}</groupId>
-	<artifactId>seata-metrics-prometheus</artifactId>
-</dependency>
-
-

重新编译Server,启动,输入http://tc-server-ip:9898/metrics,即可获得最新的Metrics数据,例如:

-
# HELP seata seata
-# TYPE seata untyped
-seata_transaction{meter="counter",role="tc",status="committed",} 1358.0 1551946035372
-seata_transaction{meter="counter",role="tc",status="active",} 0.0 1551946035372
-seata_transaction{meter="summary",role="tc",statistic="count",status="committed",} 6.0 1551946035372
-seata_transaction{meter="summary",role="tc",statistic="total",status="committed",} 6.0 1551946035372
-seata_transaction{meter="summary",role="tc",statistic="tps",status="committed",} 1.6163793103448276 1551946035372
-seata_transaction{meter="timer",role="tc",statistic="count",status="committed",} 6.0 1551946035372
-seata_transaction{meter="timer",role="tc",statistic="total",status="committed",} 910.0 1551946035372
-seata_transaction{meter="timer",role="tc",statistic="max",status="committed",} 164.0 1551946035372
-seata_transaction{meter="timer",role="tc",statistic="average",status="committed",} 151.66666666666666 1551946035372
-
-
-

提示:

-
    -
  1. 目前我们使用的Prometheus数据发布端口固定为9898,未来会将其修改为可配置项,请确保此端口不会被占用;
  2. -
  3. 如果某些Transaction状态没有发生,例如rollback,那么对应的Metrics指标也不会存在(输出)。
  4. -
-
-

步骤二:修改Prometheus配置文件并启动Prometheus

-

打开Prometheus的配置文件prometheus.yml,在scrape_configs中增加一项抓取Seata TC的Metrics数据:

-
scrape_configs:
-  # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
-  - job_name: 'prometheus'
-
-    # metrics_path defaults to '/metrics'
-    # scheme defaults to 'http'.
-
-    static_configs:
-    - targets: ['localhost:9090']
-
-  - job_name: 'seata'
-
-    # metrics_path defaults to '/metrics'
-    # scheme defaults to 'http'.
-
-    static_configs:
-    - targets: ['tc-server-ip:9898']
-
-

步骤三:在Prometheus UI或Grafana中查看Seata TC的Metrics

-

在浏览器中打开Prometheus UIhttp://localhost:9090/graph,选择seata_transaction,点击查询,即可获取到最新数据:

-

tc-prometheus

-

推荐在Prometheus中结合配置Grafana获得更好的查询效果:

-

tc-grafana

-
-

提示:此配置是将Prometheus作为Grafana的数据源,因此数据完全相同,只是使用Grafana显示效果更佳。

-
-
- - - - - - - diff --git a/zh-cn/docs/ops/operation.json b/zh-cn/docs/ops/operation.json deleted file mode 100644 index 887559a5..00000000 --- a/zh-cn/docs/ops/operation.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "operation.md", - "__html": "

运维指南

\n

Metrics配置指南

\n

Seata支持在TC、TM和RM三个角色开启Metrics数据采集并输出到Prometheus监控系统中。

\n

在TC中配置开启Metrics

\n

步骤一:在Seata Server中增加Metrics的依赖并重新编译Server

\n

打开Seata Server源代码的pom,添加Metrics依赖:

\n
<dependency>\n\t<groupId>${project.groupId}</groupId>\n\t<artifactId>seata-metrics-prometheus</artifactId>\n</dependency>\n
\n

重新编译Server,启动,输入http://tc-server-ip:9898/metrics,即可获得最新的Metrics数据,例如:

\n
# HELP seata seata\n# TYPE seata untyped\nseata_transaction{meter="counter",role="tc",status="committed",} 1358.0 1551946035372\nseata_transaction{meter="counter",role="tc",status="active",} 0.0 1551946035372\nseata_transaction{meter="summary",role="tc",statistic="count",status="committed",} 6.0 1551946035372\nseata_transaction{meter="summary",role="tc",statistic="total",status="committed",} 6.0 1551946035372\nseata_transaction{meter="summary",role="tc",statistic="tps",status="committed",} 1.6163793103448276 1551946035372\nseata_transaction{meter="timer",role="tc",statistic="count",status="committed",} 6.0 1551946035372\nseata_transaction{meter="timer",role="tc",statistic="total",status="committed",} 910.0 1551946035372\nseata_transaction{meter="timer",role="tc",statistic="max",status="committed",} 164.0 1551946035372\nseata_transaction{meter="timer",role="tc",statistic="average",status="committed",} 151.66666666666666 1551946035372\n
\n
\n

提示:

\n
    \n
  1. 目前我们使用的Prometheus数据发布端口固定为9898,未来会将其修改为可配置项,请确保此端口不会被占用;
  2. \n
  3. 如果某些Transaction状态没有发生,例如rollback,那么对应的Metrics指标也不会存在(输出)。
  4. \n
\n
\n

步骤二:修改Prometheus配置文件并启动Prometheus

\n

打开Prometheus的配置文件prometheus.yml,在scrape_configs中增加一项抓取Seata TC的Metrics数据:

\n
scrape_configs:\n  # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.\n  - job_name: 'prometheus'\n\n    # metrics_path defaults to '/metrics'\n    # scheme defaults to 'http'.\n\n    static_configs:\n    - targets: ['localhost:9090']\n\n  - job_name: 'seata'\n\n    # metrics_path defaults to '/metrics'\n    # scheme defaults to 'http'.\n\n    static_configs:\n    - targets: ['tc-server-ip:9898']\n
\n

步骤三:在Prometheus UI或Grafana中查看Seata TC的Metrics

\n

在浏览器中打开Prometheus UIhttp://localhost:9090/graph,选择seata_transaction,点击查询,即可获取到最新数据:

\n

\"tc-prometheus\"

\n

推荐在Prometheus中结合配置Grafana获得更好的查询效果:

\n

\"tc-grafana\"

\n
\n

提示:此配置是将Prometheus作为Grafana的数据源,因此数据完全相同,只是使用Grafana显示效果更佳。

\n
\n", - "link": "/zh-cn/docs/ops/operation.html", - "meta": { - "title": "运维指南", - "keywords": "Seata", - "description": "Seata支持在TC、TM和RM三个角色开启Metrics数据采集并输出到Prometheus监控系统中。" - } -} \ No newline at end of file diff --git a/zh-cn/docs/overview/faq.html b/zh-cn/docs/overview/faq.html deleted file mode 100644 index d9888508..00000000 --- a/zh-cn/docs/overview/faq.html +++ /dev/null @@ -1,141 +0,0 @@ - - - - - - - - - - Seata常见问题 - - - - -
文档

常见问题

-

1.Seata 目前可以用于生产环境吗?

-

2.Seata 目前支持高可用吗?

-

3.undo_log表log_status=1的记录是做什么用的?

-

4.怎么使用Seata框架,来保证事务的隔离性?

-

5.脏数据回滚失败如何处理?

-

6.为什么分支事务注册时, 全局事务状态不是begin?

-

7.Nacos 作为 Seata 配置中心时,项目启动报错找不到服务。如何排查,如何处理?

-

8.Eureka做注册中心,TC高可用时,如何在TC端覆盖Eureka属性?

-

9.java.lang.NoSuchMethodError: com.fasterxml.jackson.databind.jsontype.TypeSerializer.typeId(Ljava/lang/Object;Lcom/fasterxml/jackson/core/JsonToken;)?

-

10.为什么mybatis没有返回自增ID?

-
-

Q: 1.Seata 目前可以用于生产环境吗?

-

A: -0.4.2版本之后就可以上生产环境,欢迎已经在使用的企业参与此issue:who's using Seata

-
-

Q: 2.Seata 目前支持高可用吗?

-

A: -0.6版本开始支持,tc使用db模式共享全局事务会话信息,注册中心使用非file的seata支持的第三方注册中心

-
-

Q: 3.undo_log表log_status=1的记录是做什么用的?

-

A:

-
    -
  • 场景 : 分支事务a注册TC后,a的本地事务提交前发生了全局事务回滚
  • -
  • 后果 : 全局事务回滚成功,a资源被占用掉,产生了资源悬挂问题
  • -
  • 防悬挂措施: a回滚时发现回滚undo还未插入,则插入一条log_status=1的undo记录,a本地事务(业务写操作sql和对应undo为一个本地事务)提交时会因为undo表主键冲突而提交失败。
  • -
-
-

Q: 4.怎么使用Seata框架,来保证事务的隔离性?

-

A: -因seata一阶段本地事务已提交,为防止其他事务脏读脏写需要加强隔离。

-
    -
  1. 脏读 select语句加for update,代理方法增加@GlobalLock或@GlobalTransaction
  2. -
  3. 脏写 必须使用@GlobalTransaction
    -注:如果你查询的业务的接口没有GlobalTransactional 包裹,也就是这个方法上压根没有分布式事务的需求,这时你可以在方法上标注@GlobalLock 注解,并且在查询语句上加 for update。 -如果你查询的接口在事务链路上外层有GlobalTransactional注解,那么你查询的语句只要加for update就行。设计这个注解的原因是在没有这个注解之前,需要查询分布式事务读已提交的数据,但业务本身不需要分布式事务。 -若使用GlobalTransactional注解就会增加一些没用的额外的rpc开销比如begin 返回xid,提交事务等。GlobalLock简化了rpc过程,使其做到更高的性能。
  4. -
-
-

Q: 5.脏数据回滚失败如何处理?

-

A:

-
    -
  1. 脏数据需手动处理,根据日志提示修正数据或者将对应undo删除(可自定义实现FailureHandler做邮件通知或其他)
  2. -
  3. 关闭回滚时undo镜像校验,不推荐该方案。
  4. -
-
注:建议事前做好隔离保证无脏数据
-
-
-

Q: 6.为什么分支事务注册时, 全局事务状态不是begin?

-

A:

-
    -
  • 异常:Could not register branch into global session xid = status = Rollbacked(还有Rollbacking、AsyncCommitting等等二阶段状态) while expecting Begin
  • -
  • 描述:分支事务注册时,全局事务状态需是一阶段状态begin,非begin不允许注册。属于seata框架层面正常的处理,用户可以从自身业务层面解决。
  • -
  • 出现场景(可继续补充)
  • -
-
  1. 分支事务是异步,全局事务无法感知它的执行进度,全局事务已进入二阶段,该异步分支才来注册
-  2. 服务a rpc 服务b超时(dubbo、feign等默认1秒超时),a上抛异常给tm,tm通知tc回滚,但是b还是收到了请求(网络延迟或rpc框架重试),然后去tc注册时发现全局事务已在回滚
-  3. tc感知全局事务超时(@GlobalTransactional(timeoutMills = 默认60秒)),主动变更状态并通知各分支事务回滚,此时有新的分支事务来注册
-
-
-

Q: 7.Nacos 作为 Seata 配置中心时,项目启动报错找不到服务。如何排查,如何处理?

-

A: -异常:io.seata.common.exception.FrameworkException: can not register RM,err:can not connect to services-server.

-
    -
  1. 查看nacos配置列表,seata配置是否已经导入成功
  2. -
  3. 查看nacos服务列表,serverAddr是否已经注册成功
  4. -
  5. 检查client端的registry.conf里面的namespace,registry.nacos.namespace和config.nacos.namespace填入nacos的命名空间ID,默认"",server端和client端对应,namespace -为public是nacos的一个保留控件,如果您需要创建自己的namespace,最好不要和public重名,以一个实际业务场景有具体语义的名字来命名
  6. -
  7. nacos上服务列表,serverAddr地址对应ip地址应为seata启动指定ip地址,如:sh seata-server.sh -p 8091 -h 122.51.204.197 -m file
  8. -
  9. 查看seata/conf/nacos-config.txt 事务分组service.vgroup_mapping.trade_group=default配置与项目分组配置名称是否一致
  10. -
  11. telnet ip 端口 查看端口是都开放,以及防火墙状态
  12. -
-
注:1.080版本启动指定ip问题,出现异常Exception in thread "main" java.lang.RuntimeException: java.net.BindException: Cannot assign request address,请升级到081以上版本  
-    2.项目使用jdk13,启动出现
-
-
Error: Could not create the Java Virtual Machine.
-Error: A fatal exception has occurred. Program will exit.
-
-
    如环境为sh,替换脚本中最后一段:
-
-
        exec "$JAVACMD" $JAVA_OPTS -server -Xmx2048m -Xms2048m -Xmn1024m -Xss512k -XX:SurvivorRatio=10 -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=256m -XX:MaxDirectMemorySize=1024m -XX:-OmitStackTraceInFastThrow -XX:-UseAdaptiveSizePolicy -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath="$BASEDIR"/logs/java_heapdump.hprof -XX:+DisableExplicitGC -XX:+CMSParallelRemarkEnabled -XX:+
-UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=75 -verbose:gc -Dio.netty.leakDetectionLevel=advanced \
-      -classpath "$CLASSPATH" \
-          -Dapp.name="seata-server" \
-          -Dapp.pid="$$" \
-          -Dapp.repo="$REPO" \
-          -Dapp.home="$BASEDIR" \
-          -Dbasedir="$BASEDIR" \
-          io.seata.server.Server \
-          "$@"
-
-
-

Q: 8.Eureka做注册中心,TC高可用时,如何在TC端覆盖Eureka属性?

-

A: -在seata\conf目录下新增eureka-client.properties文件,添加要覆盖的Eureka属性即可。
-例如,要覆盖eureka.instance.lease-renewal-interval-in-seconds和eureka.instance.lease-expiration-duration-in-seconds,添加如下内容:

-
eureka.lease.renewalInterval=1
-eureka.lease.duration=2
-
-

属性前缀为eureka,其后的属性名可以参考类com.netflix.appinfo.PropertyBasedInstanceConfigConstants,也可研究seata源码中的discovery模块的seata-discovery-eureka工程

-
-

Q: 9.发生下面异常是啥原因? java.lang.NoSuchMethodError: com.fasterxml.jackson.databind.jsontype.TypeSerializer.typeId(Ljava/lang/Object;Lcom/fasterxml/jackson/core/JsonToken;)?

-

A: -undolog序列化配置为jackson时,jackson版本需要为2.9.9+

-
-

Q: 10.为什么mybatis没有返回自增ID?

-

A: -需要修改mybatis的配置: 在@Options(useGeneratedKeys = true, keyProperty = "id")或者在xml中指定useGeneratedKeys 和 keyProperty属性

-
-
- - - - - - - diff --git a/zh-cn/docs/overview/faq.json b/zh-cn/docs/overview/faq.json deleted file mode 100644 index 0c08f52b..00000000 --- a/zh-cn/docs/overview/faq.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "faq.md", - "__html": "

常见问题

\n

1.Seata 目前可以用于生产环境吗?

\n

2.Seata 目前支持高可用吗?

\n

3.undo_log表log_status=1的记录是做什么用的?

\n

4.怎么使用Seata框架,来保证事务的隔离性?

\n

5.脏数据回滚失败如何处理?

\n

6.为什么分支事务注册时, 全局事务状态不是begin?

\n

7.Nacos 作为 Seata 配置中心时,项目启动报错找不到服务。如何排查,如何处理?

\n

8.Eureka做注册中心,TC高可用时,如何在TC端覆盖Eureka属性?

\n

9.java.lang.NoSuchMethodError: com.fasterxml.jackson.databind.jsontype.TypeSerializer.typeId(Ljava/lang/Object;Lcom/fasterxml/jackson/core/JsonToken;)?

\n

10.为什么mybatis没有返回自增ID?

\n
\n

Q: 1.Seata 目前可以用于生产环境吗?

\n

A:\n0.4.2版本之后就可以上生产环境,欢迎已经在使用的企业参与此issue:who's using Seata

\n
\n

Q: 2.Seata 目前支持高可用吗?

\n

A:\n0.6版本开始支持,tc使用db模式共享全局事务会话信息,注册中心使用非file的seata支持的第三方注册中心

\n
\n

Q: 3.undo_log表log_status=1的记录是做什么用的?

\n

A:

\n
    \n
  • 场景 : 分支事务a注册TC后,a的本地事务提交前发生了全局事务回滚
  • \n
  • 后果 : 全局事务回滚成功,a资源被占用掉,产生了资源悬挂问题
  • \n
  • 防悬挂措施: a回滚时发现回滚undo还未插入,则插入一条log_status=1的undo记录,a本地事务(业务写操作sql和对应undo为一个本地事务)提交时会因为undo表主键冲突而提交失败。
  • \n
\n
\n

Q: 4.怎么使用Seata框架,来保证事务的隔离性?

\n

A:\n因seata一阶段本地事务已提交,为防止其他事务脏读脏写需要加强隔离。

\n
    \n
  1. 脏读 select语句加for update,代理方法增加@GlobalLock或@GlobalTransaction
  2. \n
  3. 脏写 必须使用@GlobalTransaction
    \n注:如果你查询的业务的接口没有GlobalTransactional 包裹,也就是这个方法上压根没有分布式事务的需求,这时你可以在方法上标注@GlobalLock 注解,并且在查询语句上加 for update。\n如果你查询的接口在事务链路上外层有GlobalTransactional注解,那么你查询的语句只要加for update就行。设计这个注解的原因是在没有这个注解之前,需要查询分布式事务读已提交的数据,但业务本身不需要分布式事务。\n若使用GlobalTransactional注解就会增加一些没用的额外的rpc开销比如begin 返回xid,提交事务等。GlobalLock简化了rpc过程,使其做到更高的性能。
  4. \n
\n
\n

Q: 5.脏数据回滚失败如何处理?

\n

A:

\n
    \n
  1. 脏数据需手动处理,根据日志提示修正数据或者将对应undo删除(可自定义实现FailureHandler做邮件通知或其他)
  2. \n
  3. 关闭回滚时undo镜像校验,不推荐该方案。
  4. \n
\n
注:建议事前做好隔离保证无脏数据\n
\n
\n

Q: 6.为什么分支事务注册时, 全局事务状态不是begin?

\n

A:

\n
    \n
  • 异常:Could not register branch into global session xid = status = Rollbacked(还有Rollbacking、AsyncCommitting等等二阶段状态) while expecting Begin
  • \n
  • 描述:分支事务注册时,全局事务状态需是一阶段状态begin,非begin不允许注册。属于seata框架层面正常的处理,用户可以从自身业务层面解决。
  • \n
  • 出现场景(可继续补充)
  • \n
\n
  1. 分支事务是异步,全局事务无法感知它的执行进度,全局事务已进入二阶段,该异步分支才来注册\n  2. 服务a rpc 服务b超时(dubbo、feign等默认1秒超时),a上抛异常给tm,tm通知tc回滚,但是b还是收到了请求(网络延迟或rpc框架重试),然后去tc注册时发现全局事务已在回滚\n  3. tc感知全局事务超时(@GlobalTransactional(timeoutMills = 默认60秒)),主动变更状态并通知各分支事务回滚,此时有新的分支事务来注册\n
\n
\n

Q: 7.Nacos 作为 Seata 配置中心时,项目启动报错找不到服务。如何排查,如何处理?

\n

A:\n异常:io.seata.common.exception.FrameworkException: can not register RM,err:can not connect to services-server.

\n
    \n
  1. 查看nacos配置列表,seata配置是否已经导入成功
  2. \n
  3. 查看nacos服务列表,serverAddr是否已经注册成功
  4. \n
  5. 检查client端的registry.conf里面的namespace,registry.nacos.namespace和config.nacos.namespace填入nacos的命名空间ID,默认"",server端和client端对应,namespace\n为public是nacos的一个保留控件,如果您需要创建自己的namespace,最好不要和public重名,以一个实际业务场景有具体语义的名字来命名
  6. \n
  7. nacos上服务列表,serverAddr地址对应ip地址应为seata启动指定ip地址,如:sh seata-server.sh -p 8091 -h 122.51.204.197 -m file
  8. \n
  9. 查看seata/conf/nacos-config.txt 事务分组service.vgroup_mapping.trade_group=default配置与项目分组配置名称是否一致
  10. \n
  11. telnet ip 端口 查看端口是都开放,以及防火墙状态
  12. \n
\n
注:1.080版本启动指定ip问题,出现异常Exception in thread "main" java.lang.RuntimeException: java.net.BindException: Cannot assign request address,请升级到081以上版本  \n    2.项目使用jdk13,启动出现\n
\n
Error: Could not create the Java Virtual Machine.\nError: A fatal exception has occurred. Program will exit.\n
\n
    如环境为sh,替换脚本中最后一段:\n
\n
        exec \"$JAVACMD\" $JAVA_OPTS -server -Xmx2048m -Xms2048m -Xmn1024m -Xss512k -XX:SurvivorRatio=10 -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=256m -XX:MaxDirectMemorySize=1024m -XX:-OmitStackTraceInFastThrow -XX:-UseAdaptiveSizePolicy -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=\"$BASEDIR\"/logs/java_heapdump.hprof -XX:+DisableExplicitGC -XX:+CMSParallelRemarkEnabled -XX:+\nUseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=75 -verbose:gc -Dio.netty.leakDetectionLevel=advanced \\\n      -classpath \"$CLASSPATH\" \\\n          -Dapp.name=\"seata-server\" \\\n          -Dapp.pid=\"$$\" \\\n          -Dapp.repo=\"$REPO\" \\\n          -Dapp.home=\"$BASEDIR\" \\\n          -Dbasedir=\"$BASEDIR\" \\\n          io.seata.server.Server \\\n          \"$@\"\n
\n
\n

Q: 8.Eureka做注册中心,TC高可用时,如何在TC端覆盖Eureka属性?

\n

A:\n在seata\\conf目录下新增eureka-client.properties文件,添加要覆盖的Eureka属性即可。
\n例如,要覆盖eureka.instance.lease-renewal-interval-in-seconds和eureka.instance.lease-expiration-duration-in-seconds,添加如下内容:

\n
eureka.lease.renewalInterval=1\neureka.lease.duration=2\n
\n

属性前缀为eureka,其后的属性名可以参考类com.netflix.appinfo.PropertyBasedInstanceConfigConstants,也可研究seata源码中的discovery模块的seata-discovery-eureka工程

\n
\n

Q: 9.发生下面异常是啥原因? java.lang.NoSuchMethodError: com.fasterxml.jackson.databind.jsontype.TypeSerializer.typeId(Ljava/lang/Object;Lcom/fasterxml/jackson/core/JsonToken;)?

\n

A:\nundolog序列化配置为jackson时,jackson版本需要为2.9.9+

\n
\n

Q: 10.为什么mybatis没有返回自增ID?

\n

A:\n需要修改mybatis的配置: 在@Options(useGeneratedKeys = true, keyProperty = "id")或者在xml中指定useGeneratedKeys 和 keyProperty属性

\n
\n", - "link": "/zh-cn/docs/overview/faq.html", - "meta": { - "title": "Seata常见问题", - "keywords": "Seata", - "description": "Seata 常见问题。" - } -} \ No newline at end of file diff --git a/zh-cn/docs/overview/terminology.html b/zh-cn/docs/overview/terminology.html deleted file mode 100644 index 10d13770..00000000 --- a/zh-cn/docs/overview/terminology.html +++ /dev/null @@ -1,39 +0,0 @@ - - - - - - - - - - Seata术语 - - - - -
文档

Seata术语

-

TC - 事务协调者

-

维护全局和分支事务的状态,驱动全局事务提交或回滚。

-

TM - 事务管理器

-

定义全局事务的范围:开始全局事务、提交或回滚全局事务。

-

RM - 资源管理器

-

管理分支事务处理的资源,与TC交谈以注册分支事务和报告分支事务的状态,并驱动分支事务提交或回滚。

-
- - - - - - - diff --git a/zh-cn/docs/overview/terminology.json b/zh-cn/docs/overview/terminology.json deleted file mode 100644 index ae841e11..00000000 --- a/zh-cn/docs/overview/terminology.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "terminology.md", - "__html": "

Seata术语

\n

TC - 事务协调者

\n

维护全局和分支事务的状态,驱动全局事务提交或回滚。

\n

TM - 事务管理器

\n

定义全局事务的范围:开始全局事务、提交或回滚全局事务。

\n

RM - 资源管理器

\n

管理分支事务处理的资源,与TC交谈以注册分支事务和报告分支事务的状态,并驱动分支事务提交或回滚。

\n", - "link": "/zh-cn/docs/overview/terminology.html", - "meta": { - "title": "Seata术语", - "keywords": "Seata", - "description": "Seata术语。" - } -} \ No newline at end of file diff --git a/zh-cn/docs/overview/what-is-seata.html b/zh-cn/docs/overview/what-is-seata.html deleted file mode 100644 index e4874077..00000000 --- a/zh-cn/docs/overview/what-is-seata.html +++ /dev/null @@ -1,321 +0,0 @@ - - - - - - - - - - Seata 是什么 - - - - -
文档

Seata 是什么?

-

Seata 是一款开源的分布式事务解决方案,致力于提供高性能和简单易用的分布式事务服务。Seata 将为用户提供了 AT、TCC、SAGA 和 XA 事务模式,为用户打造一站式的分布式解决方案。

-

AT 模式

-

前提

-
    -
  • 基于支持本地 ACID 事务的关系型数据库。
  • -
  • Java 应用,通过 JDBC 访问数据库。
  • -
-

整体机制

-

两阶段提交协议的演变:

-
    -
  • -

    一阶段:业务数据和回滚日志记录在同一个本地事务中提交,释放本地锁和连接资源。

    -
  • -
  • -

    二阶段:

    -
      -
    • 提交异步化,非常快速地完成。
    • -
    • 回滚通过一阶段的回滚日志进行反向补偿。
    • -
    -
  • -
-

写隔离

-
    -
  • 一阶段本地事务提交前,需要确保先拿到 全局锁
  • -
  • 拿不到 全局锁 ,不能提交本地事务。
  • -
  • 全局锁 的尝试被限制在一定范围内,超出范围将放弃,并回滚本地事务,释放本地锁。
  • -
-

以一个示例来说明:

-

两个全局事务 tx1 和 tx2,分别对 a 表的 m 字段进行更新操作,m 的初始值 1000。

-

tx1 先开始,开启本地事务,拿到本地锁,更新操作 m = 1000 - 100 = 900。本地事务提交前,先拿到该记录的 全局锁 ,本地提交释放本地锁。 -tx2 后开始,开启本地事务,拿到本地锁,更新操作 m = 900 - 100 = 800。本地事务提交前,尝试拿该记录的 全局锁 ,tx1 全局提交前,该记录的全局锁被 tx1 持有,tx2 需要重试等待 全局锁

-

Write-Isolation: Commit

-

tx1 二阶段全局提交,释放 全局锁 。tx2 拿到 全局锁 提交本地事务。

-

Write-Isolation: Rollback

-

如果 tx1 的二阶段全局回滚,则 tx1 需要重新获取该数据的本地锁,进行反向补偿的更新操作,实现分支的回滚。

-

此时,如果 tx2 仍在等待该数据的 全局锁,同时持有本地锁,则 tx1 的分支回滚会失败。分支的回滚会一直重试,直到 tx2 的 全局锁 等锁超时,放弃 全局锁 并回滚本地事务释放本地锁,tx1 的分支回滚最终成功。

-

因为整个过程 全局锁 在 tx1 结束前一直是被 tx1 持有的,所以不会发生 脏写 的问题。

-

读隔离

-

在数据库本地事务隔离级别 读已提交(Read Committed) 或以上的基础上,Seata(AT 模式)的默认全局隔离级别是 读未提交(Read Uncommitted)

-

如果应用在特定场景下,必需要求全局的 读已提交 ,目前 Seata 的方式是通过 SELECT FOR UPDATE 语句的代理。

-

Read Isolation: SELECT FOR UPDATE

-

SELECT FOR UPDATE 语句的执行会申请 全局锁 ,如果 全局锁 被其他事务持有,则释放本地锁(回滚 SELECT FOR UPDATE 语句的本地执行)并重试。这个过程中,查询是被 block 住的,直到 全局锁 拿到,即读取的相关数据是 已提交 的,才返回。

-

出于总体性能上的考虑,Seata 目前的方案并没有对所有 SELECT 语句都进行代理,仅针对 FOR UPDATE 的 SELECT 语句。

-

工作机制

-

以一个示例来说明整个 AT 分支的工作过程。

-

业务表:product

- - - - - - - - - - - - - - - - - - - - - - - - - -
FieldTypeKey
idbigint(20)PRI
namevarchar(100)
sincevarchar(100)
-

AT 分支事务的业务逻辑:

-
update product set name = 'GTS' where name = 'TXC';
-
-

一阶段

-

过程:

-
    -
  1. 解析 SQL:得到 SQL 的类型(UPDATE),表(product),条件(where name = 'TXC')等相关的信息。
  2. -
  3. 查询前镜像:根据解析得到的条件信息,生成查询语句,定位数据。
  4. -
-
select id, name, since from product where name = 'TXC';
-
-

得到前镜像:

- - - - - - - - - - - - - - - -
idnamesince
1TXC2014
-
    -
  1. 执行业务 SQL:更新这条记录的 name 为 'GTS'。
  2. -
  3. 查询后镜像:根据前镜像的结果,通过 主键 定位数据。
  4. -
-
select id, name, since from product where id = 1`;
-
-

得到后镜像:

- - - - - - - - - - - - - - - -
idnamesince
1GTS2014
-
    -
  1. 插入回滚日志:把前后镜像数据以及业务 SQL 相关的信息组成一条回滚日志记录,插入到 UNDO_LOG 表中。
  2. -
-
{
-	"branchId": 641789253,
-	"undoItems": [{
-		"afterImage": {
-			"rows": [{
-				"fields": [{
-					"name": "id",
-					"type": 4,
-					"value": 1
-				}, {
-					"name": "name",
-					"type": 12,
-					"value": "GTS"
-				}, {
-					"name": "since",
-					"type": 12,
-					"value": "2014"
-				}]
-			}],
-			"tableName": "product"
-		},
-		"beforeImage": {
-			"rows": [{
-				"fields": [{
-					"name": "id",
-					"type": 4,
-					"value": 1
-				}, {
-					"name": "name",
-					"type": 12,
-					"value": "TXC"
-				}, {
-					"name": "since",
-					"type": 12,
-					"value": "2014"
-				}]
-			}],
-			"tableName": "product"
-		},
-		"sqlType": "UPDATE"
-	}],
-	"xid": "xid:xxx"
-}
-
-
    -
  1. 提交前,向 TC 注册分支:申请 product 表中,主键值等于 1 的记录的 全局锁
  2. -
  3. 本地事务提交:业务数据的更新和前面步骤中生成的 UNDO LOG 一并提交。
  4. -
  5. 将本地事务提交的结果上报给 TC。
  6. -
-

二阶段-回滚

-
    -
  1. 收到 TC 的分支回滚请求,开启一个本地事务,执行如下操作。
  2. -
  3. 通过 XID 和 Branch ID 查找到相应的 UNDO LOG 记录。
  4. -
  5. 数据校验:拿 UNDO LOG 中的后镜与当前数据进行比较,如果有不同,说明数据被当前全局事务之外的动作做了修改。这种情况,需要根据配置策略来做处理,详细的说明在另外的文档中介绍。
  6. -
  7. 根据 UNDO LOG 中的前镜像和业务 SQL 的相关信息生成并执行回滚的语句:
  8. -
-
update product set name = 'TXC' where id = 1;
-
-
    -
  1. 提交本地事务。并把本地事务的执行结果(即分支事务回滚的结果)上报给 TC。
  2. -
-

二阶段-提交

-
    -
  1. 收到 TC 的分支提交请求,把请求放入一个异步任务的队列中,马上返回提交成功的结果给 TC。
  2. -
  3. 异步任务阶段的分支提交请求将异步和批量地删除相应 UNDO LOG 记录。
  4. -
-

附录

-

回滚日志表

-

UNDO_LOG Table:不同数据库在类型上会略有差别。

-

以 MySQL 为例:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldType
branch_idbigint PK
xidvarchar(100)
contextvarchar(128)
rollback_infolongblob
log_statustinyint
log_createddatetime
log_modifieddatetime
-
-- 注意此处0.7.0+ 增加字段 context
-CREATE TABLE `undo_log` (
-  `id` bigint(20) NOT NULL AUTO_INCREMENT,
-  `branch_id` bigint(20) NOT NULL,
-  `xid` varchar(100) NOT NULL,
-  `context` varchar(128) NOT NULL,
-  `rollback_info` longblob NOT NULL,
-  `log_status` int(11) NOT NULL,
-  `log_created` datetime NOT NULL,
-  `log_modified` datetime NOT NULL,
-  PRIMARY KEY (`id`),
-  UNIQUE KEY `ux_undo_log` (`xid`,`branch_id`)
-) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-
-

TCC 模式

-

回顾总览中的描述:一个分布式的全局事务,整体是 两阶段提交 的模型。全局事务是由若干分支事务组成的,分支事务要满足 两阶段提交 的模型要求,即需要每个分支事务都具备自己的:

-
    -
  • 一阶段 prepare 行为
  • -
  • 二阶段 commit 或 rollback 行为
  • -
-

Overview of a global transaction

-

根据两阶段行为模式的不同,我们将分支事务划分为 Automatic (Branch) Transaction ModeManual (Branch) Transaction Mode.

-

AT 模式(参考链接 TBD)基于 支持本地 ACID 事务关系型数据库

-
    -
  • 一阶段 prepare 行为:在本地事务中,一并提交业务数据更新和相应回滚日志记录。
  • -
  • 二阶段 commit 行为:马上成功结束,自动 异步批量清理回滚日志。
  • -
  • 二阶段 rollback 行为:通过回滚日志,自动 生成补偿操作,完成数据回滚。
  • -
-

相应的,TCC 模式,不依赖于底层数据资源的事务支持:

-
    -
  • 一阶段 prepare 行为:调用 自定义 的 prepare 逻辑。
  • -
  • 二阶段 commit 行为:调用 自定义 的 commit 逻辑。
  • -
  • 二阶段 rollback 行为:调用 自定义 的 rollback 逻辑。
  • -
-

所谓 TCC 模式,是指支持把 自定义 的分支事务纳入到全局事务的管理中。

-

Saga 模式

-

Saga模式是SEATA提供的长事务解决方案,在Saga模式中,业务流程中每个参与者都提交本地事务,当出现某一个参与者失败则补偿前面已经成功的参与者,一阶段正向服务和二阶段补偿服务都由业务开发实现。

-

Saga模式示意图

-

理论基础:Hector & Kenneth 发表论⽂ Sagas (1987)

-

适用场景:

-
    -
  • 业务流程长、业务流程多
  • -
  • 参与者包含其它公司或遗留系统服务,无法提供 TCC 模式要求的三个接口
  • -
-

优势:

-
    -
  • 一阶段提交本地事务,无锁,高性能
  • -
  • 事件驱动架构,参与者可异步执行,高吞吐
  • -
  • 补偿服务易于实现
  • -
-

缺点:

- -
- - - - - - - diff --git a/zh-cn/docs/overview/what-is-seata.json b/zh-cn/docs/overview/what-is-seata.json deleted file mode 100644 index c1e8f370..00000000 --- a/zh-cn/docs/overview/what-is-seata.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "what-is-seata.md", - "__html": "

Seata 是什么?

\n

Seata 是一款开源的分布式事务解决方案,致力于提供高性能和简单易用的分布式事务服务。Seata 将为用户提供了 AT、TCC、SAGA 和 XA 事务模式,为用户打造一站式的分布式解决方案。

\n

AT 模式

\n

前提

\n
    \n
  • 基于支持本地 ACID 事务的关系型数据库。
  • \n
  • Java 应用,通过 JDBC 访问数据库。
  • \n
\n

整体机制

\n

两阶段提交协议的演变:

\n
    \n
  • \n

    一阶段:业务数据和回滚日志记录在同一个本地事务中提交,释放本地锁和连接资源。

    \n
  • \n
  • \n

    二阶段:

    \n
      \n
    • 提交异步化,非常快速地完成。
    • \n
    • 回滚通过一阶段的回滚日志进行反向补偿。
    • \n
    \n
  • \n
\n

写隔离

\n
    \n
  • 一阶段本地事务提交前,需要确保先拿到 全局锁
  • \n
  • 拿不到 全局锁 ,不能提交本地事务。
  • \n
  • 全局锁 的尝试被限制在一定范围内,超出范围将放弃,并回滚本地事务,释放本地锁。
  • \n
\n

以一个示例来说明:

\n

两个全局事务 tx1 和 tx2,分别对 a 表的 m 字段进行更新操作,m 的初始值 1000。

\n

tx1 先开始,开启本地事务,拿到本地锁,更新操作 m = 1000 - 100 = 900。本地事务提交前,先拿到该记录的 全局锁 ,本地提交释放本地锁。\ntx2 后开始,开启本地事务,拿到本地锁,更新操作 m = 900 - 100 = 800。本地事务提交前,尝试拿该记录的 全局锁 ,tx1 全局提交前,该记录的全局锁被 tx1 持有,tx2 需要重试等待 全局锁

\n

\"Write-Isolation:

\n

tx1 二阶段全局提交,释放 全局锁 。tx2 拿到 全局锁 提交本地事务。

\n

\"Write-Isolation:

\n

如果 tx1 的二阶段全局回滚,则 tx1 需要重新获取该数据的本地锁,进行反向补偿的更新操作,实现分支的回滚。

\n

此时,如果 tx2 仍在等待该数据的 全局锁,同时持有本地锁,则 tx1 的分支回滚会失败。分支的回滚会一直重试,直到 tx2 的 全局锁 等锁超时,放弃 全局锁 并回滚本地事务释放本地锁,tx1 的分支回滚最终成功。

\n

因为整个过程 全局锁 在 tx1 结束前一直是被 tx1 持有的,所以不会发生 脏写 的问题。

\n

读隔离

\n

在数据库本地事务隔离级别 读已提交(Read Committed) 或以上的基础上,Seata(AT 模式)的默认全局隔离级别是 读未提交(Read Uncommitted)

\n

如果应用在特定场景下,必需要求全局的 读已提交 ,目前 Seata 的方式是通过 SELECT FOR UPDATE 语句的代理。

\n

\"Read

\n

SELECT FOR UPDATE 语句的执行会申请 全局锁 ,如果 全局锁 被其他事务持有,则释放本地锁(回滚 SELECT FOR UPDATE 语句的本地执行)并重试。这个过程中,查询是被 block 住的,直到 全局锁 拿到,即读取的相关数据是 已提交 的,才返回。

\n

出于总体性能上的考虑,Seata 目前的方案并没有对所有 SELECT 语句都进行代理,仅针对 FOR UPDATE 的 SELECT 语句。

\n

工作机制

\n

以一个示例来说明整个 AT 分支的工作过程。

\n

业务表:product

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
FieldTypeKey
idbigint(20)PRI
namevarchar(100)
sincevarchar(100)
\n

AT 分支事务的业务逻辑:

\n
update product set name = 'GTS' where name = 'TXC';\n
\n

一阶段

\n

过程:

\n
    \n
  1. 解析 SQL:得到 SQL 的类型(UPDATE),表(product),条件(where name = 'TXC')等相关的信息。
  2. \n
  3. 查询前镜像:根据解析得到的条件信息,生成查询语句,定位数据。
  4. \n
\n
select id, name, since from product where name = 'TXC';\n
\n

得到前镜像:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
idnamesince
1TXC2014
\n
    \n
  1. 执行业务 SQL:更新这条记录的 name 为 'GTS'。
  2. \n
  3. 查询后镜像:根据前镜像的结果,通过 主键 定位数据。
  4. \n
\n
select id, name, since from product where id = 1`;\n
\n

得到后镜像:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
idnamesince
1GTS2014
\n
    \n
  1. 插入回滚日志:把前后镜像数据以及业务 SQL 相关的信息组成一条回滚日志记录,插入到 UNDO_LOG 表中。
  2. \n
\n
{\n\t\"branchId\": 641789253,\n\t\"undoItems\": [{\n\t\t\"afterImage\": {\n\t\t\t\"rows\": [{\n\t\t\t\t\"fields\": [{\n\t\t\t\t\t\"name\": \"id\",\n\t\t\t\t\t\"type\": 4,\n\t\t\t\t\t\"value\": 1\n\t\t\t\t}, {\n\t\t\t\t\t\"name\": \"name\",\n\t\t\t\t\t\"type\": 12,\n\t\t\t\t\t\"value\": \"GTS\"\n\t\t\t\t}, {\n\t\t\t\t\t\"name\": \"since\",\n\t\t\t\t\t\"type\": 12,\n\t\t\t\t\t\"value\": \"2014\"\n\t\t\t\t}]\n\t\t\t}],\n\t\t\t\"tableName\": \"product\"\n\t\t},\n\t\t\"beforeImage\": {\n\t\t\t\"rows\": [{\n\t\t\t\t\"fields\": [{\n\t\t\t\t\t\"name\": \"id\",\n\t\t\t\t\t\"type\": 4,\n\t\t\t\t\t\"value\": 1\n\t\t\t\t}, {\n\t\t\t\t\t\"name\": \"name\",\n\t\t\t\t\t\"type\": 12,\n\t\t\t\t\t\"value\": \"TXC\"\n\t\t\t\t}, {\n\t\t\t\t\t\"name\": \"since\",\n\t\t\t\t\t\"type\": 12,\n\t\t\t\t\t\"value\": \"2014\"\n\t\t\t\t}]\n\t\t\t}],\n\t\t\t\"tableName\": \"product\"\n\t\t},\n\t\t\"sqlType\": \"UPDATE\"\n\t}],\n\t\"xid\": \"xid:xxx\"\n}\n
\n
    \n
  1. 提交前,向 TC 注册分支:申请 product 表中,主键值等于 1 的记录的 全局锁
  2. \n
  3. 本地事务提交:业务数据的更新和前面步骤中生成的 UNDO LOG 一并提交。
  4. \n
  5. 将本地事务提交的结果上报给 TC。
  6. \n
\n

二阶段-回滚

\n
    \n
  1. 收到 TC 的分支回滚请求,开启一个本地事务,执行如下操作。
  2. \n
  3. 通过 XID 和 Branch ID 查找到相应的 UNDO LOG 记录。
  4. \n
  5. 数据校验:拿 UNDO LOG 中的后镜与当前数据进行比较,如果有不同,说明数据被当前全局事务之外的动作做了修改。这种情况,需要根据配置策略来做处理,详细的说明在另外的文档中介绍。
  6. \n
  7. 根据 UNDO LOG 中的前镜像和业务 SQL 的相关信息生成并执行回滚的语句:
  8. \n
\n
update product set name = 'TXC' where id = 1;\n
\n
    \n
  1. 提交本地事务。并把本地事务的执行结果(即分支事务回滚的结果)上报给 TC。
  2. \n
\n

二阶段-提交

\n
    \n
  1. 收到 TC 的分支提交请求,把请求放入一个异步任务的队列中,马上返回提交成功的结果给 TC。
  2. \n
  3. 异步任务阶段的分支提交请求将异步和批量地删除相应 UNDO LOG 记录。
  4. \n
\n

附录

\n

回滚日志表

\n

UNDO_LOG Table:不同数据库在类型上会略有差别。

\n

以 MySQL 为例:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
FieldType
branch_idbigint PK
xidvarchar(100)
contextvarchar(128)
rollback_infolongblob
log_statustinyint
log_createddatetime
log_modifieddatetime
\n
-- 注意此处0.7.0+ 增加字段 context\nCREATE TABLE `undo_log` (\n  `id` bigint(20) NOT NULL AUTO_INCREMENT,\n  `branch_id` bigint(20) NOT NULL,\n  `xid` varchar(100) NOT NULL,\n  `context` varchar(128) NOT NULL,\n  `rollback_info` longblob NOT NULL,\n  `log_status` int(11) NOT NULL,\n  `log_created` datetime NOT NULL,\n  `log_modified` datetime NOT NULL,\n  PRIMARY KEY (`id`),\n  UNIQUE KEY `ux_undo_log` (`xid`,`branch_id`)\n) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;\n
\n

TCC 模式

\n

回顾总览中的描述:一个分布式的全局事务,整体是 两阶段提交 的模型。全局事务是由若干分支事务组成的,分支事务要满足 两阶段提交 的模型要求,即需要每个分支事务都具备自己的:

\n
    \n
  • 一阶段 prepare 行为
  • \n
  • 二阶段 commit 或 rollback 行为
  • \n
\n

\"Overview

\n

根据两阶段行为模式的不同,我们将分支事务划分为 Automatic (Branch) Transaction ModeManual (Branch) Transaction Mode.

\n

AT 模式(参考链接 TBD)基于 支持本地 ACID 事务关系型数据库

\n
    \n
  • 一阶段 prepare 行为:在本地事务中,一并提交业务数据更新和相应回滚日志记录。
  • \n
  • 二阶段 commit 行为:马上成功结束,自动 异步批量清理回滚日志。
  • \n
  • 二阶段 rollback 行为:通过回滚日志,自动 生成补偿操作,完成数据回滚。
  • \n
\n

相应的,TCC 模式,不依赖于底层数据资源的事务支持:

\n
    \n
  • 一阶段 prepare 行为:调用 自定义 的 prepare 逻辑。
  • \n
  • 二阶段 commit 行为:调用 自定义 的 commit 逻辑。
  • \n
  • 二阶段 rollback 行为:调用 自定义 的 rollback 逻辑。
  • \n
\n

所谓 TCC 模式,是指支持把 自定义 的分支事务纳入到全局事务的管理中。

\n

Saga 模式

\n

Saga模式是SEATA提供的长事务解决方案,在Saga模式中,业务流程中每个参与者都提交本地事务,当出现某一个参与者失败则补偿前面已经成功的参与者,一阶段正向服务和二阶段补偿服务都由业务开发实现。

\n

\"Saga模式示意图\"

\n

理论基础:Hector & Kenneth 发表论⽂ Sagas (1987)

\n

适用场景:

\n
    \n
  • 业务流程长、业务流程多
  • \n
  • 参与者包含其它公司或遗留系统服务,无法提供 TCC 模式要求的三个接口
  • \n
\n

优势:

\n
    \n
  • 一阶段提交本地事务,无锁,高性能
  • \n
  • 事件驱动架构,参与者可异步执行,高吞吐
  • \n
  • 补偿服务易于实现
  • \n
\n

缺点:

\n\n", - "link": "/zh-cn/docs/overview/what-is-seata.html", - "meta": { - "title": "Seata 是什么", - "keywords": "Seata", - "description": "Seata 是一款开源的分布式事务解决方案,致力于提供高性能和简单易用的分布式事务服务。Seata 将为用户提供了 AT、TCC、SAGA 和 XA 事务模式,为用户打造一站式的分布式解决方案。" - } -} \ No newline at end of file diff --git a/zh-cn/docs/overview/what_is_seata.html b/zh-cn/docs/overview/what_is_seata.html deleted file mode 100644 index 1139e7b2..00000000 --- a/zh-cn/docs/overview/what_is_seata.html +++ /dev/null @@ -1,312 +0,0 @@ - - - - - - - - - - what_is_seata - - - - -
文档

Seata 是什么?

-

Seata 是一款开源的分布式事务解决方案,致力于提供高性能和简单易用的分布式事务服务。Seata 将为用户提供了 AT、TCC、SAGA 和 XA 事务模式,为用户打造一站式的分布式解决方案。

-

AT 模式

-

前提

-
    -
  • 基于支持本地 ACID 事务的关系型数据库。
  • -
  • Java 应用,通过 JDBC 访问数据库。
  • -
-

整体机制

-

两阶段提交协议的演变:

-
    -
  • -

    一阶段:业务数据和回滚日志记录在同一个本地事务中提交,释放本地锁和连接资源。

    -
  • -
  • -

    二阶段:

    -
      -
    • 提交异步化,非常快速地完成。
    • -
    • 回滚通过一阶段的回滚日志进行反向补偿。
    • -
    -
  • -
-

写隔离

-
    -
  • 一阶段本地事务提交前,需要确保先拿到 全局锁
  • -
  • 拿不到 全局锁 ,不能提交本地事务。
  • -
  • 全局锁 的尝试被限制在一定范围内,超出范围将放弃,并回滚本地事务,释放本地锁。
  • -
-

以一个示例来说明:

-

两个全局事务 tx1 和 tx2,分别对 a 表的 m 字段进行更新操作,m 的初始值 1000。

-

tx1 先开始,开启本地事务,拿到本地锁,更新操作 m = 1000 - 100 = 900。本地事务提交前,先拿到该记录的 全局锁 ,本地提交释放本地锁。 -tx2 后开始,开启本地事务,拿到本地锁,更新操作 m = 900 - 100 = 800。本地事务提交前,尝试拿该记录的 全局锁 ,tx1 全局提交前,该记录的全局锁被 tx1 持有,tx2 需要重试等待 全局锁

-

Write-Isolation: Commit

-

tx1 二阶段全局提交,释放 全局锁 。tx2 拿到 全局锁 提交本地事务。

-

Write-Isolation: Rollback

-

如果 tx1 的二阶段全局回滚,则 tx1 需要重新获取该数据的本地锁,进行反向补偿的更新操作,实现分支的回滚。

-

此时,如果 tx2 仍在等待该数据的 全局锁,同时持有本地锁,则 tx1 的分支回滚会失败。分支的回滚会一直重试,直到 tx2 的 全局锁 等锁超时,放弃 全局锁 并回滚本地事务释放本地锁,tx1 的分支回滚最终成功。

-

因为整个过程 全局锁 在 tx1 结束前一直是被 tx1 持有的,所以不会发生 脏写 的问题。

-

读隔离

-

在数据库本地事务隔离级别 读已提交(Read Committed) 或以上的基础上,Seata(AT 模式)的默认全局隔离级别是 读未提交(Read Uncommitted)

-

如果应用在特定场景下,必需要求全局的 读已提交 ,目前 Seata 的方式是通过 SELECT FOR UPDATE 语句的代理。

-

Read Isolation: SELECT FOR UPDATE

-

SELECT FOR UPDATE 语句的执行会申请 全局锁 ,如果 全局锁 被其他事务持有,则释放本地锁(回滚 SELECT FOR UPDATE 语句的本地执行)并重试。这个过程中,查询是被 block 住的,直到 全局锁 拿到,即读取的相关数据是 已提交 的,才返回。

-

出于总体性能上的考虑,Seata 目前的方案并没有对所有 SELECT 语句都进行代理,仅针对 FOR UPDATE 的 SELECT 语句。

-

工作机制

-

以一个示例来说明整个 AT 分支的工作过程。

-

业务表:product

- - - - - - - - - - - - - - - - - - - - - - - - - -
FieldTypeKey
idbigint(20)PRI
namevarchar(100)
sincevarchar(100)
-

AT 分支事务的业务逻辑:

-
update product set name = 'GTS' where name = 'TXC';
-
-

一阶段

-

过程:

-
    -
  1. 解析 SQL:得到 SQL 的类型(UPDATE),表(product),条件(where name = 'TXC')等相关的信息。
  2. -
  3. 查询前镜像:根据解析得到的条件信息,生成查询语句,定位数据。
  4. -
-
select id, name, since from product where name = 'TXC';
-
-

得到前镜像:

- - - - - - - - - - - - - - - -
idnamesince
1TXC2014
-
    -
  1. 执行业务 SQL:更新这条记录的 name 为 'GTS'。
  2. -
  3. 查询后镜像:根据前镜像的结果,通过 主键 定位数据。
  4. -
-
select id, name, since from product where id = 1`;
-
-

得到后镜像:

- - - - - - - - - - - - - - - -
idnamesince
1GTS2014
-
    -
  1. 插入回滚日志:把前后镜像数据以及业务 SQL 相关的信息组成一条回滚日志记录,插入到 UNDO_LOG 表中。
  2. -
-
{
-	"branchId": 641789253,
-	"undoItems": [{
-		"afterImage": {
-			"rows": [{
-				"fields": [{
-					"name": "id",
-					"type": 4,
-					"value": 1
-				}, {
-					"name": "name",
-					"type": 12,
-					"value": "GTS"
-				}, {
-					"name": "since",
-					"type": 12,
-					"value": "2014"
-				}]
-			}],
-			"tableName": "product"
-		},
-		"beforeImage": {
-			"rows": [{
-				"fields": [{
-					"name": "id",
-					"type": 4,
-					"value": 1
-				}, {
-					"name": "name",
-					"type": 12,
-					"value": "TXC"
-				}, {
-					"name": "since",
-					"type": 12,
-					"value": "2014"
-				}]
-			}],
-			"tableName": "product"
-		},
-		"sqlType": "UPDATE"
-	}],
-	"xid": "xid:xxx"
-}
-
-
    -
  1. 提交前,向 TC 注册分支:申请 product 表中,主键值等于 1 的记录的 全局锁
  2. -
  3. 本地事务提交:业务数据的更新和前面步骤中生成的 UNDO LOG 一并提交。
  4. -
  5. 将本地事务提交的结果上报给 TC。
  6. -
-

二阶段-回滚

-
    -
  1. 收到 TC 的分支回滚请求,开启一个本地事务,执行如下操作。
  2. -
  3. 通过 XID 和 Branch ID 查找到相应的 UNDO LOG 记录。
  4. -
  5. 数据校验:拿 UNDO LOG 中的后镜与当前数据进行比较,如果有不同,说明数据被当前全局事务之外的动作做了修改。这种情况,需要根据配置策略来做处理,详细的说明在另外的文档中介绍。
  6. -
  7. 根据 UNDO LOG 中的前镜像和业务 SQL 的相关信息生成并执行回滚的语句:
  8. -
-
update product set name = 'TXC' where id = 1;
-
-
    -
  1. 提交本地事务。并把本地事务的执行结果(即分支事务回滚的结果)上报给 TC。
  2. -
-

二阶段-提交

-
    -
  1. 收到 TC 的分支提交请求,把请求放入一个异步任务的队列中,马上返回提交成功的结果给 TC。
  2. -
  3. 异步任务阶段的分支提交请求将异步和批量地删除相应 UNDO LOG 记录。
  4. -
-

附录

-

回滚日志表

-

UNDO_LOG Table:不同数据库在类型上会略有差别。

-

以 MySQL 为例:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldType
branch_idbigint PK
xidvarchar(100)
contextvarchar(128)
rollback_infolongblob
log_statustinyint
log_createddatetime
log_modifieddatetime
-
-- 注意此处0.7.0+ 增加字段 context
-CREATE TABLE `undo_log` (
-  `id` bigint(20) NOT NULL AUTO_INCREMENT,
-  `branch_id` bigint(20) NOT NULL,
-  `xid` varchar(100) NOT NULL,
-  `context` varchar(128) NOT NULL,
-  `rollback_info` longblob NOT NULL,
-  `log_status` int(11) NOT NULL,
-  `log_created` datetime NOT NULL,
-  `log_modified` datetime NOT NULL,
-  PRIMARY KEY (`id`),
-  UNIQUE KEY `ux_undo_log` (`xid`,`branch_id`)
-) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-
-

TCC 模式

-

回顾总览中的描述:一个分布式的全局事务,整体是 两阶段提交 的模型。全局事务是由若干分支事务组成的,分支事务要满足 两阶段提交 的模型要求,即需要每个分支事务都具备自己的:

-
    -
  • 一阶段 prepare 行为
  • -
  • 二阶段 commit 或 rollback 行为
  • -
-

Overview of a global transaction

-

根据两阶段行为模式的不同,我们将分支事务划分为 Automatic (Branch) Transaction ModeManual (Branch) Transaction Mode.

-

AT 模式(参考链接 TBD)基于 支持本地 ACID 事务关系型数据库

-
    -
  • 一阶段 prepare 行为:在本地事务中,一并提交业务数据更新和相应回滚日志记录。
  • -
  • 二阶段 commit 行为:马上成功结束,自动 异步批量清理回滚日志。
  • -
  • 二阶段 rollback 行为:通过回滚日志,自动 生成补偿操作,完成数据回滚。
  • -
-

相应的,TCC 模式,不依赖于底层数据资源的事务支持:

-
    -
  • 一阶段 prepare 行为:调用 自定义 的 prepare 逻辑。
  • -
  • 二阶段 commit 行为:调用 自定义 的 commit 逻辑。
  • -
  • 二阶段 rollback 行为:调用 自定义 的 rollback 逻辑。
  • -
-

所谓 TCC 模式,是指支持把 自定义 的分支事务纳入到全局事务的管理中。

-

Saga 模式

-

Saga模式是SEATA提供的长事务解决方案,在Saga模式中,业务流程中每个参与者都提交本地事务,当出现某一个参与者失败则补偿前面已经成功的参与者,一阶段正向服务和二阶段补偿服务都由业务开发实现。

-

Saga模式示意图

-

理论基础:Hector & Kenneth 发表论⽂ Sagas (1987)

-

适用场景:

-
    -
  • 业务流程长、业务流程多
  • -
  • 参与者包含其它公司或遗留系统服务,无法提供 TCC 模式要求的三个接口
  • -
-

优势:

-
    -
  • 一阶段提交本地事务,无锁,高性能
  • -
  • 事件驱动架构,参与者可异步执行,高吞吐
  • -
  • 补偿服务易于实现
  • -
-

缺点:

- -
- - - - - - diff --git a/zh-cn/docs/overview/what_is_seata.json b/zh-cn/docs/overview/what_is_seata.json deleted file mode 100644 index 89b0b663..00000000 --- a/zh-cn/docs/overview/what_is_seata.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "filename": "what_is_seata.md", - "__html": "

Seata 是什么?

\n

Seata 是一款开源的分布式事务解决方案,致力于提供高性能和简单易用的分布式事务服务。Seata 将为用户提供了 AT、TCC、SAGA 和 XA 事务模式,为用户打造一站式的分布式解决方案。

\n

AT 模式

\n

前提

\n
    \n
  • 基于支持本地 ACID 事务的关系型数据库。
  • \n
  • Java 应用,通过 JDBC 访问数据库。
  • \n
\n

整体机制

\n

两阶段提交协议的演变:

\n
    \n
  • \n

    一阶段:业务数据和回滚日志记录在同一个本地事务中提交,释放本地锁和连接资源。

    \n
  • \n
  • \n

    二阶段:

    \n
      \n
    • 提交异步化,非常快速地完成。
    • \n
    • 回滚通过一阶段的回滚日志进行反向补偿。
    • \n
    \n
  • \n
\n

写隔离

\n
    \n
  • 一阶段本地事务提交前,需要确保先拿到 全局锁
  • \n
  • 拿不到 全局锁 ,不能提交本地事务。
  • \n
  • 全局锁 的尝试被限制在一定范围内,超出范围将放弃,并回滚本地事务,释放本地锁。
  • \n
\n

以一个示例来说明:

\n

两个全局事务 tx1 和 tx2,分别对 a 表的 m 字段进行更新操作,m 的初始值 1000。

\n

tx1 先开始,开启本地事务,拿到本地锁,更新操作 m = 1000 - 100 = 900。本地事务提交前,先拿到该记录的 全局锁 ,本地提交释放本地锁。\ntx2 后开始,开启本地事务,拿到本地锁,更新操作 m = 900 - 100 = 800。本地事务提交前,尝试拿该记录的 全局锁 ,tx1 全局提交前,该记录的全局锁被 tx1 持有,tx2 需要重试等待 全局锁

\n

\"Write-Isolation:

\n

tx1 二阶段全局提交,释放 全局锁 。tx2 拿到 全局锁 提交本地事务。

\n

\"Write-Isolation:

\n

如果 tx1 的二阶段全局回滚,则 tx1 需要重新获取该数据的本地锁,进行反向补偿的更新操作,实现分支的回滚。

\n

此时,如果 tx2 仍在等待该数据的 全局锁,同时持有本地锁,则 tx1 的分支回滚会失败。分支的回滚会一直重试,直到 tx2 的 全局锁 等锁超时,放弃 全局锁 并回滚本地事务释放本地锁,tx1 的分支回滚最终成功。

\n

因为整个过程 全局锁 在 tx1 结束前一直是被 tx1 持有的,所以不会发生 脏写 的问题。

\n

读隔离

\n

在数据库本地事务隔离级别 读已提交(Read Committed) 或以上的基础上,Seata(AT 模式)的默认全局隔离级别是 读未提交(Read Uncommitted)

\n

如果应用在特定场景下,必需要求全局的 读已提交 ,目前 Seata 的方式是通过 SELECT FOR UPDATE 语句的代理。

\n

\"Read

\n

SELECT FOR UPDATE 语句的执行会申请 全局锁 ,如果 全局锁 被其他事务持有,则释放本地锁(回滚 SELECT FOR UPDATE 语句的本地执行)并重试。这个过程中,查询是被 block 住的,直到 全局锁 拿到,即读取的相关数据是 已提交 的,才返回。

\n

出于总体性能上的考虑,Seata 目前的方案并没有对所有 SELECT 语句都进行代理,仅针对 FOR UPDATE 的 SELECT 语句。

\n

工作机制

\n

以一个示例来说明整个 AT 分支的工作过程。

\n

业务表:product

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
FieldTypeKey
idbigint(20)PRI
namevarchar(100)
sincevarchar(100)
\n

AT 分支事务的业务逻辑:

\n
update product set name = 'GTS' where name = 'TXC';\n
\n

一阶段

\n

过程:

\n
    \n
  1. 解析 SQL:得到 SQL 的类型(UPDATE),表(product),条件(where name = 'TXC')等相关的信息。
  2. \n
  3. 查询前镜像:根据解析得到的条件信息,生成查询语句,定位数据。
  4. \n
\n
select id, name, since from product where name = 'TXC';\n
\n

得到前镜像:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
idnamesince
1TXC2014
\n
    \n
  1. 执行业务 SQL:更新这条记录的 name 为 'GTS'。
  2. \n
  3. 查询后镜像:根据前镜像的结果,通过 主键 定位数据。
  4. \n
\n
select id, name, since from product where id = 1`;\n
\n

得到后镜像:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
idnamesince
1GTS2014
\n
    \n
  1. 插入回滚日志:把前后镜像数据以及业务 SQL 相关的信息组成一条回滚日志记录,插入到 UNDO_LOG 表中。
  2. \n
\n
{\n\t\"branchId\": 641789253,\n\t\"undoItems\": [{\n\t\t\"afterImage\": {\n\t\t\t\"rows\": [{\n\t\t\t\t\"fields\": [{\n\t\t\t\t\t\"name\": \"id\",\n\t\t\t\t\t\"type\": 4,\n\t\t\t\t\t\"value\": 1\n\t\t\t\t}, {\n\t\t\t\t\t\"name\": \"name\",\n\t\t\t\t\t\"type\": 12,\n\t\t\t\t\t\"value\": \"GTS\"\n\t\t\t\t}, {\n\t\t\t\t\t\"name\": \"since\",\n\t\t\t\t\t\"type\": 12,\n\t\t\t\t\t\"value\": \"2014\"\n\t\t\t\t}]\n\t\t\t}],\n\t\t\t\"tableName\": \"product\"\n\t\t},\n\t\t\"beforeImage\": {\n\t\t\t\"rows\": [{\n\t\t\t\t\"fields\": [{\n\t\t\t\t\t\"name\": \"id\",\n\t\t\t\t\t\"type\": 4,\n\t\t\t\t\t\"value\": 1\n\t\t\t\t}, {\n\t\t\t\t\t\"name\": \"name\",\n\t\t\t\t\t\"type\": 12,\n\t\t\t\t\t\"value\": \"TXC\"\n\t\t\t\t}, {\n\t\t\t\t\t\"name\": \"since\",\n\t\t\t\t\t\"type\": 12,\n\t\t\t\t\t\"value\": \"2014\"\n\t\t\t\t}]\n\t\t\t}],\n\t\t\t\"tableName\": \"product\"\n\t\t},\n\t\t\"sqlType\": \"UPDATE\"\n\t}],\n\t\"xid\": \"xid:xxx\"\n}\n
\n
    \n
  1. 提交前,向 TC 注册分支:申请 product 表中,主键值等于 1 的记录的 全局锁
  2. \n
  3. 本地事务提交:业务数据的更新和前面步骤中生成的 UNDO LOG 一并提交。
  4. \n
  5. 将本地事务提交的结果上报给 TC。
  6. \n
\n

二阶段-回滚

\n
    \n
  1. 收到 TC 的分支回滚请求,开启一个本地事务,执行如下操作。
  2. \n
  3. 通过 XID 和 Branch ID 查找到相应的 UNDO LOG 记录。
  4. \n
  5. 数据校验:拿 UNDO LOG 中的后镜与当前数据进行比较,如果有不同,说明数据被当前全局事务之外的动作做了修改。这种情况,需要根据配置策略来做处理,详细的说明在另外的文档中介绍。
  6. \n
  7. 根据 UNDO LOG 中的前镜像和业务 SQL 的相关信息生成并执行回滚的语句:
  8. \n
\n
update product set name = 'TXC' where id = 1;\n
\n
    \n
  1. 提交本地事务。并把本地事务的执行结果(即分支事务回滚的结果)上报给 TC。
  2. \n
\n

二阶段-提交

\n
    \n
  1. 收到 TC 的分支提交请求,把请求放入一个异步任务的队列中,马上返回提交成功的结果给 TC。
  2. \n
  3. 异步任务阶段的分支提交请求将异步和批量地删除相应 UNDO LOG 记录。
  4. \n
\n

附录

\n

回滚日志表

\n

UNDO_LOG Table:不同数据库在类型上会略有差别。

\n

以 MySQL 为例:

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
FieldType
branch_idbigint PK
xidvarchar(100)
contextvarchar(128)
rollback_infolongblob
log_statustinyint
log_createddatetime
log_modifieddatetime
\n
-- 注意此处0.7.0+ 增加字段 context\nCREATE TABLE `undo_log` (\n  `id` bigint(20) NOT NULL AUTO_INCREMENT,\n  `branch_id` bigint(20) NOT NULL,\n  `xid` varchar(100) NOT NULL,\n  `context` varchar(128) NOT NULL,\n  `rollback_info` longblob NOT NULL,\n  `log_status` int(11) NOT NULL,\n  `log_created` datetime NOT NULL,\n  `log_modified` datetime NOT NULL,\n  PRIMARY KEY (`id`),\n  UNIQUE KEY `ux_undo_log` (`xid`,`branch_id`)\n) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;\n
\n

TCC 模式

\n

回顾总览中的描述:一个分布式的全局事务,整体是 两阶段提交 的模型。全局事务是由若干分支事务组成的,分支事务要满足 两阶段提交 的模型要求,即需要每个分支事务都具备自己的:

\n
    \n
  • 一阶段 prepare 行为
  • \n
  • 二阶段 commit 或 rollback 行为
  • \n
\n

\"Overview

\n

根据两阶段行为模式的不同,我们将分支事务划分为 Automatic (Branch) Transaction ModeManual (Branch) Transaction Mode.

\n

AT 模式(参考链接 TBD)基于 支持本地 ACID 事务关系型数据库

\n
    \n
  • 一阶段 prepare 行为:在本地事务中,一并提交业务数据更新和相应回滚日志记录。
  • \n
  • 二阶段 commit 行为:马上成功结束,自动 异步批量清理回滚日志。
  • \n
  • 二阶段 rollback 行为:通过回滚日志,自动 生成补偿操作,完成数据回滚。
  • \n
\n

相应的,TCC 模式,不依赖于底层数据资源的事务支持:

\n
    \n
  • 一阶段 prepare 行为:调用 自定义 的 prepare 逻辑。
  • \n
  • 二阶段 commit 行为:调用 自定义 的 commit 逻辑。
  • \n
  • 二阶段 rollback 行为:调用 自定义 的 rollback 逻辑。
  • \n
\n

所谓 TCC 模式,是指支持把 自定义 的分支事务纳入到全局事务的管理中。

\n

Saga 模式

\n

Saga模式是SEATA提供的长事务解决方案,在Saga模式中,业务流程中每个参与者都提交本地事务,当出现某一个参与者失败则补偿前面已经成功的参与者,一阶段正向服务和二阶段补偿服务都由业务开发实现。

\n

\"Saga模式示意图\"

\n

理论基础:Hector & Kenneth 发表论⽂ Sagas (1987)

\n

适用场景:

\n
    \n
  • 业务流程长、业务流程多
  • \n
  • 参与者包含其它公司或遗留系统服务,无法提供 TCC 模式要求的三个接口
  • \n
\n

优势:

\n
    \n
  • 一阶段提交本地事务,无锁,高性能
  • \n
  • 事件驱动架构,参与者可异步执行,高吞吐
  • \n
  • 补偿服务易于实现
  • \n
\n

缺点:

\n\n", - "link": "/zh-cn/docs/overview/what_is_seata.html", - "meta": {} -} \ No newline at end of file diff --git a/zh-cn/docs/user/api.html b/zh-cn/docs/user/api.html deleted file mode 100644 index d3c6a007..00000000 --- a/zh-cn/docs/user/api.html +++ /dev/null @@ -1,247 +0,0 @@ - - - - - - - - - - Seata api - - - - -
文档

1. 概述

-

Seata API 分为两大类:High-Level API 和 Low-Level API :

-
    -
  • High-Level API :用于事务边界定义、控制及事务状态查询。
  • -
  • Low-Level API :用于控制事务上下文的传播。
  • -
-

2. High-Level API

-

2.1 GlobalTransaction

-

全局事务:包括开启事务、提交、回滚、获取当前状态等方法。

-
public interface GlobalTransaction {
-
-    /**
-     * 开启一个全局事务(使用默认的事务名和超时时间)
-     */
-    void begin() throws TransactionException;
-
-    /**
-     * 开启一个全局事务,并指定超时时间(使用默认的事务名)
-     */
-    void begin(int timeout) throws TransactionException;
-
-    /**
-     * 开启一个全局事务,并指定事务名和超时时间
-     */
-    void begin(int timeout, String name) throws TransactionException;
-
-    /**
-     * 全局提交
-     */
-    void commit() throws TransactionException;
-
-    /**
-     * 全局回滚
-     */
-    void rollback() throws TransactionException;
-
-    /**
-     * 获取事务的当前状态
-     */
-    GlobalStatus getStatus() throws TransactionException;
-
-    /**
-     * 获取事务的 XID
-     */
-    String getXid();
-
-}
-
-

2.2 GlobalTransactionContext

-

GlobalTransaction 实例的获取需要通过 GlobalTransactionContext:

-

-    /**
-     * 获取当前的全局事务实例,如果没有则创建一个新的实例。
-     */
-    public static GlobalTransaction getCurrentOrCreate() {
-        GlobalTransaction tx = getCurrent();
-        if (tx == null) {
-            return createNew();
-        }
-        return tx;
-    }
-
-    /**
-     * 重新载入给定 XID 的全局事务实例,这个实例不允许执行开启事务的操作。
-     * 这个 API 通常用于失败的事务的后续集中处理。
-     * 比如:全局提交超时,后续集中处理通过重新载入该实例,通过实例方法获取事务当前状态,并根据状态判断是否需要重试全局提交操作。
-     */
-    public static GlobalTransaction reload(String xid) throws TransactionException {
-        GlobalTransaction tx = new DefaultGlobalTransaction(xid, GlobalStatus.UnKnown, GlobalTransactionRole.Launcher) {
-            @Override
-            public void begin(int timeout, String name) throws TransactionException {
-                throw new IllegalStateException("Never BEGIN on a RELOADED GlobalTransaction. ");
-            }
-        };
-        return tx;
-    }
-
-

2.3 TransactionalTemplate

-

事务化模板:通过上述 GlobalTransaction 和 GlobalTransactionContext API 把一个业务服务的调用包装成带有分布式事务支持的服务。

-
public class TransactionalTemplate {
-
-    public Object execute(TransactionalExecutor business) throws TransactionalExecutor.ExecutionException {
-
-        // 1. 获取当前全局事务实例或创建新的实例
-        GlobalTransaction tx = GlobalTransactionContext.getCurrentOrCreate();
-
-        // 2. 开启全局事务
-        try {
-            tx.begin(business.timeout(), business.name());
-
-        } catch (TransactionException txe) {
-            // 2.1 开启失败
-            throw new TransactionalExecutor.ExecutionException(tx, txe,
-                TransactionalExecutor.Code.BeginFailure);
-
-        }
-
-        Object rs = null;
-        try {
-            // 3. 调用业务服务
-            rs = business.execute();
-
-        } catch (Throwable ex) {
-
-            // 业务调用本身的异常
-            try {
-                // 全局回滚
-                tx.rollback();
-
-                // 3.1 全局回滚成功:抛出原始业务异常
-                throw new TransactionalExecutor.ExecutionException(tx, TransactionalExecutor.Code.RollbackDone, ex);
-
-            } catch (TransactionException txe) {
-                // 3.2 全局回滚失败:
-                throw new TransactionalExecutor.ExecutionException(tx, txe,
-                    TransactionalExecutor.Code.RollbackFailure, ex);
-
-            }
-
-        }
-
-        // 4. 全局提交
-        try {
-            tx.commit();
-
-        } catch (TransactionException txe) {
-            // 4.1 全局提交失败:
-            throw new TransactionalExecutor.ExecutionException(tx, txe,
-                TransactionalExecutor.Code.CommitFailure);
-
-        }
-        return rs;
-    }
-
-}
-
-

模板方法执行的异常:ExecutionException

-
    class ExecutionException extends Exception {
-
-        // 发生异常的事务实例
-        private GlobalTransaction transaction;
-
-        // 异常编码:
-        // BeginFailure(开启事务失败)
-        // CommitFailure(全局提交失败)
-        // RollbackFailure(全局回滚失败)
-        // RollbackDone(全局回滚成功)
-        private Code code;
-
-        // 触发回滚的业务原始异常
-        private Throwable originalException;
-
-

外层调用逻辑 try-catch 这个异常,根据异常编码进行处理:

-
    -
  • BeginFailure (开启事务失败):getCause() 得到开启事务失败的框架异常,getOriginalException() 为空。
  • -
  • CommitFailure (全局提交失败):getCause() 得到全局提交失败的框架异常,getOriginalException() 为空。
  • -
  • RollbackFailure (全局回滚失败):getCause() 得到全局回滚失败的框架异常,getOriginalException() 业务应用的原始异常。
  • -
  • RollbackDone (全局回滚成功):getCause() 为空,getOriginalException() 业务应用的原始异常。
  • -
-

3. Low-Level API

-

3.1 RootContext

-

事务的根上下文:负责在应用的运行时,维护 XID 。

-
    /**
-     * 得到当前应用运行时的全局事务 XID
-     */
-    public static String getXID() {
-        return CONTEXT_HOLDER.get(KEY_XID);
-    }
-
-    /**
-     * 将全局事务 XID 绑定到当前应用的运行时中
-     */
-    public static void bind(String xid) {
-        if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug("bind " + xid);
-        }
-        CONTEXT_HOLDER.put(KEY_XID, xid);
-    }
-
-    /**
-     * 将全局事务 XID 从当前应用的运行时中解除绑定,同时将 XID 返回
-     */
-    public static String unbind() {
-        String xid = CONTEXT_HOLDER.remove(KEY_XID);
-        if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug("unbind " + xid);
-        }
-        return xid;
-    }
-
-    /**
-     * 判断当前应用的运行时是否处于全局事务的上下文中
-     */
-    public static boolean inGlobalTransaction() {
-        return CONTEXT_HOLDER.get(KEY_XID) != null;
-    }
-
-

High-Level API 的实现都是基于 RootContext 中维护的 XID 来做的。

-

应用的当前运行的操作是否在一个全局事务的上下文中,就是看 RootContext 中是否有 XID。

-

RootContext 的默认实现是基于 ThreadLocal 的,即 XID 保存在当前线程上下文中。

-

Low-Level API 的两个典型的应用场景:

-

1. 远程调用事务上下文的传播

-

远程调用前获取当前 XID:

-
String xid = RootContext.getXID();
-
-

远程调用过程把 XID 也传递到服务提供方,在执行服务提供方的业务逻辑前,把 XID 绑定到当前应用的运行时:

-
RootContext.bind(rpcXid);
-
-

2. 事务的暂停和恢复

-

在一个全局事务中,如果需要某些业务逻辑不在全局事务的管辖范围内,则在调用前,把 XID 解绑:

-
String unbindXid = RootContext.unbind();
-
-

待相关业务逻辑执行完成,再把 XID 绑定回去,即可实现全局事务的恢复:

-
RootContext.bind(unbindXid);
-
-
- - - - - - - diff --git a/zh-cn/docs/user/api.json b/zh-cn/docs/user/api.json deleted file mode 100644 index 573506ce..00000000 --- a/zh-cn/docs/user/api.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "api.md", - "__html": "

1. 概述

\n

Seata API 分为两大类:High-Level API 和 Low-Level API :

\n
    \n
  • High-Level API :用于事务边界定义、控制及事务状态查询。
  • \n
  • Low-Level API :用于控制事务上下文的传播。
  • \n
\n

2. High-Level API

\n

2.1 GlobalTransaction

\n

全局事务:包括开启事务、提交、回滚、获取当前状态等方法。

\n
public interface GlobalTransaction {\n\n    /**\n     * 开启一个全局事务(使用默认的事务名和超时时间)\n     */\n    void begin() throws TransactionException;\n\n    /**\n     * 开启一个全局事务,并指定超时时间(使用默认的事务名)\n     */\n    void begin(int timeout) throws TransactionException;\n\n    /**\n     * 开启一个全局事务,并指定事务名和超时时间\n     */\n    void begin(int timeout, String name) throws TransactionException;\n\n    /**\n     * 全局提交\n     */\n    void commit() throws TransactionException;\n\n    /**\n     * 全局回滚\n     */\n    void rollback() throws TransactionException;\n\n    /**\n     * 获取事务的当前状态\n     */\n    GlobalStatus getStatus() throws TransactionException;\n\n    /**\n     * 获取事务的 XID\n     */\n    String getXid();\n\n}\n
\n

2.2 GlobalTransactionContext

\n

GlobalTransaction 实例的获取需要通过 GlobalTransactionContext:

\n
\n    /**\n     * 获取当前的全局事务实例,如果没有则创建一个新的实例。\n     */\n    public static GlobalTransaction getCurrentOrCreate() {\n        GlobalTransaction tx = getCurrent();\n        if (tx == null) {\n            return createNew();\n        }\n        return tx;\n    }\n\n    /**\n     * 重新载入给定 XID 的全局事务实例,这个实例不允许执行开启事务的操作。\n     * 这个 API 通常用于失败的事务的后续集中处理。\n     * 比如:全局提交超时,后续集中处理通过重新载入该实例,通过实例方法获取事务当前状态,并根据状态判断是否需要重试全局提交操作。\n     */\n    public static GlobalTransaction reload(String xid) throws TransactionException {\n        GlobalTransaction tx = new DefaultGlobalTransaction(xid, GlobalStatus.UnKnown, GlobalTransactionRole.Launcher) {\n            @Override\n            public void begin(int timeout, String name) throws TransactionException {\n                throw new IllegalStateException(\"Never BEGIN on a RELOADED GlobalTransaction. \");\n            }\n        };\n        return tx;\n    }\n
\n

2.3 TransactionalTemplate

\n

事务化模板:通过上述 GlobalTransaction 和 GlobalTransactionContext API 把一个业务服务的调用包装成带有分布式事务支持的服务。

\n
public class TransactionalTemplate {\n\n    public Object execute(TransactionalExecutor business) throws TransactionalExecutor.ExecutionException {\n\n        // 1. 获取当前全局事务实例或创建新的实例\n        GlobalTransaction tx = GlobalTransactionContext.getCurrentOrCreate();\n\n        // 2. 开启全局事务\n        try {\n            tx.begin(business.timeout(), business.name());\n\n        } catch (TransactionException txe) {\n            // 2.1 开启失败\n            throw new TransactionalExecutor.ExecutionException(tx, txe,\n                TransactionalExecutor.Code.BeginFailure);\n\n        }\n\n        Object rs = null;\n        try {\n            // 3. 调用业务服务\n            rs = business.execute();\n\n        } catch (Throwable ex) {\n\n            // 业务调用本身的异常\n            try {\n                // 全局回滚\n                tx.rollback();\n\n                // 3.1 全局回滚成功:抛出原始业务异常\n                throw new TransactionalExecutor.ExecutionException(tx, TransactionalExecutor.Code.RollbackDone, ex);\n\n            } catch (TransactionException txe) {\n                // 3.2 全局回滚失败:\n                throw new TransactionalExecutor.ExecutionException(tx, txe,\n                    TransactionalExecutor.Code.RollbackFailure, ex);\n\n            }\n\n        }\n\n        // 4. 全局提交\n        try {\n            tx.commit();\n\n        } catch (TransactionException txe) {\n            // 4.1 全局提交失败:\n            throw new TransactionalExecutor.ExecutionException(tx, txe,\n                TransactionalExecutor.Code.CommitFailure);\n\n        }\n        return rs;\n    }\n\n}\n
\n

模板方法执行的异常:ExecutionException

\n
    class ExecutionException extends Exception {\n\n        // 发生异常的事务实例\n        private GlobalTransaction transaction;\n\n        // 异常编码:\n        // BeginFailure(开启事务失败)\n        // CommitFailure(全局提交失败)\n        // RollbackFailure(全局回滚失败)\n        // RollbackDone(全局回滚成功)\n        private Code code;\n\n        // 触发回滚的业务原始异常\n        private Throwable originalException;\n
\n

外层调用逻辑 try-catch 这个异常,根据异常编码进行处理:

\n
    \n
  • BeginFailure (开启事务失败):getCause() 得到开启事务失败的框架异常,getOriginalException() 为空。
  • \n
  • CommitFailure (全局提交失败):getCause() 得到全局提交失败的框架异常,getOriginalException() 为空。
  • \n
  • RollbackFailure (全局回滚失败):getCause() 得到全局回滚失败的框架异常,getOriginalException() 业务应用的原始异常。
  • \n
  • RollbackDone (全局回滚成功):getCause() 为空,getOriginalException() 业务应用的原始异常。
  • \n
\n

3. Low-Level API

\n

3.1 RootContext

\n

事务的根上下文:负责在应用的运行时,维护 XID 。

\n
    /**\n     * 得到当前应用运行时的全局事务 XID\n     */\n    public static String getXID() {\n        return CONTEXT_HOLDER.get(KEY_XID);\n    }\n\n    /**\n     * 将全局事务 XID 绑定到当前应用的运行时中\n     */\n    public static void bind(String xid) {\n        if (LOGGER.isDebugEnabled()) {\n            LOGGER.debug(\"bind \" + xid);\n        }\n        CONTEXT_HOLDER.put(KEY_XID, xid);\n    }\n\n    /**\n     * 将全局事务 XID 从当前应用的运行时中解除绑定,同时将 XID 返回\n     */\n    public static String unbind() {\n        String xid = CONTEXT_HOLDER.remove(KEY_XID);\n        if (LOGGER.isDebugEnabled()) {\n            LOGGER.debug(\"unbind \" + xid);\n        }\n        return xid;\n    }\n\n    /**\n     * 判断当前应用的运行时是否处于全局事务的上下文中\n     */\n    public static boolean inGlobalTransaction() {\n        return CONTEXT_HOLDER.get(KEY_XID) != null;\n    }\n
\n

High-Level API 的实现都是基于 RootContext 中维护的 XID 来做的。

\n

应用的当前运行的操作是否在一个全局事务的上下文中,就是看 RootContext 中是否有 XID。

\n

RootContext 的默认实现是基于 ThreadLocal 的,即 XID 保存在当前线程上下文中。

\n

Low-Level API 的两个典型的应用场景:

\n

1. 远程调用事务上下文的传播

\n

远程调用前获取当前 XID:

\n
String xid = RootContext.getXID();\n
\n

远程调用过程把 XID 也传递到服务提供方,在执行服务提供方的业务逻辑前,把 XID 绑定到当前应用的运行时:

\n
RootContext.bind(rpcXid);\n
\n

2. 事务的暂停和恢复

\n

在一个全局事务中,如果需要某些业务逻辑不在全局事务的管辖范围内,则在调用前,把 XID 解绑:

\n
String unbindXid = RootContext.unbind();\n
\n

待相关业务逻辑执行完成,再把 XID 绑定回去,即可实现全局事务的恢复:

\n
RootContext.bind(unbindXid);\n
\n", - "link": "/zh-cn/docs/user/api.html", - "meta": { - "title": "Seata api", - "keywords": "Seata", - "description": "Seata API 分为两大类:High-Level API 和 Low-Level API。" - } -} \ No newline at end of file diff --git a/zh-cn/docs/user/configurations-old.html b/zh-cn/docs/user/configurations-old.html deleted file mode 100644 index a2a82c06..00000000 --- a/zh-cn/docs/user/configurations-old.html +++ /dev/null @@ -1,349 +0,0 @@ - - - - - - - - - - configurations-old - - - - -
文档

seata参数配置 0.9.0版本

-

公共部分

- - - - - - - - - - - - - - - - - - - - -
keydescremark
transport.serializationclient和server通信编解码方式seata、protobuf
transport.heartbeatclient和server通信心跳检测开关默认true开启
-

server端

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
keydescremark
transaction.undo.log.save.daysundo保留天数默认7天,log_status=1(附录3)和未正常清理的undo
transaction.undo.log.delete.periodundo清理线程间隔时间默认86400000,单位毫秒
service.max.commit.retry.timeout二阶段提交重试超时时长单位毫秒,默认-1表示无限重试。公式: timeout>=now-globalTransactionBeginTime,true表示超时则不再重试
service.max.rollback.retry.timeout二阶段回滚重试超时时长同commit
recovery.committing-retry-period二阶段提交未完成状态全局事务重试提交线程间隔时间默认1000,单位毫秒
recovery.asyn-committing-retry-period二阶段异步提交状态重试提交线程间隔时间默认1000,单位毫秒
recovery.rollbacking-retry-period二阶段回滚状态重试回滚线程间隔时间默认1000,单位毫秒
recovery.timeout-retry-period超时状态检测重试线程间隔时间默认1000,单位毫秒,检测出超时将全局事务置入回滚会话管理器
store.mode事务会话信息存储方式file本地文件(不支持HA),db数据库(支持HA)
store.file.dirfile模式文件存储文件夹名默认sessionStore
store.db.datasourcedb模式数据源类型默认dbcp
store.db.db-typedb模式数据库类型默认mysql
store.db.driver-class-namedb模式数据库驱动默认com.mysql.jdbc.Driver
store.db.urldb模式数据源库url默认jdbc:mysql://127.0.0.1:3306/seata
store.db.userdb模式数据库账户默认mysql
store.db.min-conndb模式数据库初始连接数默认1
store.db.max-conndb模式数据库最大连接数默认3
store.db.global.tabledb模式全局事务表名默认global_table
store.db.branch.tabledb模式分支事务表名默认branch_table
store.db.lock-tabledb模式全局锁表名默认lock_table
store.db.query-limitdb模式查询全局事务一次的最大条数默认1000
metrics.enabled是否启用Metrics默认false关闭,在False状态下,所有与Metrics相关的组件将不会被初始化,使得性能损耗最低
metrics.registry-type指标注册器类型Metrics使用的指标注册器类型,默认为内置的compact(简易)实现,这个实现中的Meter仅使用有限内存计数,性能高足够满足大多数场景;目前只能设置一个指标注册器实现
metrics.exporter-list指标结果Measurement数据输出器列表默认prometheus,多个输出器使用英文逗号分割,例如"prometheus,jmx",目前仅实现了对接prometheus的输出器
metrics.exporter-prometheus-portprometheus输出器Client端口号默认9898
-

client端

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
keyroledescremark
service.vgroup_mapping.my_test_tx_groupTM,RM事务群组(附录1)my_test_tx_group为分组,配置项值为TC集群名
service.default.grouplistTM,RMTC服务列表(附录2)仅注册中心为file时使用
service.disableGlobalTransactionTM,RM全局事务开关默认false。false为开启,true为关闭
service.enableDegradeTM降级开关(待实现)默认false。业务侧根据连续错误数自动降级不走seata事务
client.async.commit.buffer.limitRM异步提交缓存队列长度默认10000。 二阶段提交成功,RM异步清理undo队列
client.lock.retry.internalRM校验或占用全局锁重试间隔默认10,单位毫秒
client.lock.retry.timesRM校验或占用全局锁重试次数默认30
client.lock.retry.policy.branch-rollback-on-conflictRM分支事务与其它全局回滚事务冲突时锁策略默认true,优先释放本地锁让回滚成功
client.report.retry.countTM,RM一阶段结果上报TC重试次数默认5次
client.tm.commit.retry.countTM一阶段全局提交结果上报TC重试次数默认1次,建议大于1
client.tm.rollback.retry.countTM一阶段全局回滚结果上报TC重试次数默认1次,建议大于1
client.table.meta.check.enableRM自动刷新缓存中的表结构默认true
transaction.undo.data.validationRM二阶段回滚镜像校验默认true开启,false关闭
transaction.undo.log.serializationRMundo序列化方式默认jackson
transaction.undo.log.tableRM自定义undo表名默认undo_log
support.spring.datasource.autoproxyRM数据源自动代理开关默认false关闭
-

未使用

- - - - - - - - - - - - - - - - - - - - - - - - - -
keydescremark
lock.mode锁存储方式local、remote
lock.local
lock.remote
-

附录1:

-
事务分组说明。
-1.事务分组是什么?
-事务分组是seata的资源逻辑,类似于服务实例。在file.conf中的my_test_tx_group就是一个事务分组。
-2.通过事务分组如何找到后端集群?
-首先程序中配置了事务分组(GlobalTransactionScanner 构造方法的txServiceGroup参数),程序会通过用户配置的配置中心去寻找service.vgroup_mapping.事务分组配置项,取得配置项的值就是TC集群的名称。拿到集群名称程序通过一定的前后缀+集群名称去构造服务名,各配置中心的服务名实现不同。拿到服务名去相应的注册中心去拉取相应服务名的服务列表,获得后端真实的TC服务列表。
-3.为什么这么设计,不直接取服务名?
-这里多了一层获取事务分组到映射集群的配置。这样设计后,事务分组可以作为资源的逻辑隔离单位,当发生故障时可以快速failover。
-
-

附录2:

-
关于grouplist问题说明下。
-1. 什么时候会用到file.conf中的default.grouplist?
-当registry.type=file时会用到,其他时候不读。
-2. default.grouplist的值列表是否可以配置多个?
-可以配置多个,配置多个意味着集群,但当store.mode=file时,会报错。原因是在file存储模式下未提供本地文件的同步,所以需要使用store.mode=db,通过db来共享TC集群间数据
-3. 是否推荐使用default.grouplist?
-不推荐,如问题1,当registry.type=file时会用到,也就是说这里用的不是真正的注册中心,不具体服务的健康检查机制当tc不可用时无法自动剔除列表,推荐使用nacos 、eureka、redis、zk、consul、etcd3、sofa。registry.type=file或config.type=file 设计的初衷是让用户再不依赖第三方注册中心或配置中心的前提下,通过直连的方式,快速验证seata服务。    
-
-

附录3:

-
log_status=1的是防御性的,是收到全局回滚请求,但是不确定某个事务分支的本地事务是否已经执行完成了,这时事先插入一条branchid相同的数据,插入的假数据成功了,本地事务继续执行就会报主键冲突自动回滚。
-假如插入不成功说明表里有数据这个本地事务已经执行完成了,那么取出这条undolog数据做反向回滚操作。
-
- - - - - - - diff --git a/zh-cn/docs/user/configurations-old.json b/zh-cn/docs/user/configurations-old.json deleted file mode 100644 index d9fbc6da..00000000 --- a/zh-cn/docs/user/configurations-old.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "filename": "configurations-old.md", - "__html": "

seata参数配置 0.9.0版本

\n

公共部分

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
keydescremark
transport.serializationclient和server通信编解码方式seata、protobuf
transport.heartbeatclient和server通信心跳检测开关默认true开启
\n

server端

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
keydescremark
transaction.undo.log.save.daysundo保留天数默认7天,log_status=1(附录3)和未正常清理的undo
transaction.undo.log.delete.periodundo清理线程间隔时间默认86400000,单位毫秒
service.max.commit.retry.timeout二阶段提交重试超时时长单位毫秒,默认-1表示无限重试。公式: timeout>=now-globalTransactionBeginTime,true表示超时则不再重试
service.max.rollback.retry.timeout二阶段回滚重试超时时长同commit
recovery.committing-retry-period二阶段提交未完成状态全局事务重试提交线程间隔时间默认1000,单位毫秒
recovery.asyn-committing-retry-period二阶段异步提交状态重试提交线程间隔时间默认1000,单位毫秒
recovery.rollbacking-retry-period二阶段回滚状态重试回滚线程间隔时间默认1000,单位毫秒
recovery.timeout-retry-period超时状态检测重试线程间隔时间默认1000,单位毫秒,检测出超时将全局事务置入回滚会话管理器
store.mode事务会话信息存储方式file本地文件(不支持HA),db数据库(支持HA)
store.file.dirfile模式文件存储文件夹名默认sessionStore
store.db.datasourcedb模式数据源类型默认dbcp
store.db.db-typedb模式数据库类型默认mysql
store.db.driver-class-namedb模式数据库驱动默认com.mysql.jdbc.Driver
store.db.urldb模式数据源库url默认jdbc:mysql://127.0.0.1:3306/seata
store.db.userdb模式数据库账户默认mysql
store.db.min-conndb模式数据库初始连接数默认1
store.db.max-conndb模式数据库最大连接数默认3
store.db.global.tabledb模式全局事务表名默认global_table
store.db.branch.tabledb模式分支事务表名默认branch_table
store.db.lock-tabledb模式全局锁表名默认lock_table
store.db.query-limitdb模式查询全局事务一次的最大条数默认1000
metrics.enabled是否启用Metrics默认false关闭,在False状态下,所有与Metrics相关的组件将不会被初始化,使得性能损耗最低
metrics.registry-type指标注册器类型Metrics使用的指标注册器类型,默认为内置的compact(简易)实现,这个实现中的Meter仅使用有限内存计数,性能高足够满足大多数场景;目前只能设置一个指标注册器实现
metrics.exporter-list指标结果Measurement数据输出器列表默认prometheus,多个输出器使用英文逗号分割,例如"prometheus,jmx",目前仅实现了对接prometheus的输出器
metrics.exporter-prometheus-portprometheus输出器Client端口号默认9898
\n

client端

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
keyroledescremark
service.vgroup_mapping.my_test_tx_groupTM,RM事务群组(附录1)my_test_tx_group为分组,配置项值为TC集群名
service.default.grouplistTM,RMTC服务列表(附录2)仅注册中心为file时使用
service.disableGlobalTransactionTM,RM全局事务开关默认false。false为开启,true为关闭
service.enableDegradeTM降级开关(待实现)默认false。业务侧根据连续错误数自动降级不走seata事务
client.async.commit.buffer.limitRM异步提交缓存队列长度默认10000。 二阶段提交成功,RM异步清理undo队列
client.lock.retry.internalRM校验或占用全局锁重试间隔默认10,单位毫秒
client.lock.retry.timesRM校验或占用全局锁重试次数默认30
client.lock.retry.policy.branch-rollback-on-conflictRM分支事务与其它全局回滚事务冲突时锁策略默认true,优先释放本地锁让回滚成功
client.report.retry.countTM,RM一阶段结果上报TC重试次数默认5次
client.tm.commit.retry.countTM一阶段全局提交结果上报TC重试次数默认1次,建议大于1
client.tm.rollback.retry.countTM一阶段全局回滚结果上报TC重试次数默认1次,建议大于1
client.table.meta.check.enableRM自动刷新缓存中的表结构默认true
transaction.undo.data.validationRM二阶段回滚镜像校验默认true开启,false关闭
transaction.undo.log.serializationRMundo序列化方式默认jackson
transaction.undo.log.tableRM自定义undo表名默认undo_log
support.spring.datasource.autoproxyRM数据源自动代理开关默认false关闭
\n

未使用

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
keydescremark
lock.mode锁存储方式local、remote
lock.local
lock.remote
\n

附录1:

\n
事务分组说明。\n1.事务分组是什么?\n事务分组是seata的资源逻辑,类似于服务实例。在file.conf中的my_test_tx_group就是一个事务分组。\n2.通过事务分组如何找到后端集群?\n首先程序中配置了事务分组(GlobalTransactionScanner 构造方法的txServiceGroup参数),程序会通过用户配置的配置中心去寻找service.vgroup_mapping.事务分组配置项,取得配置项的值就是TC集群的名称。拿到集群名称程序通过一定的前后缀+集群名称去构造服务名,各配置中心的服务名实现不同。拿到服务名去相应的注册中心去拉取相应服务名的服务列表,获得后端真实的TC服务列表。\n3.为什么这么设计,不直接取服务名?\n这里多了一层获取事务分组到映射集群的配置。这样设计后,事务分组可以作为资源的逻辑隔离单位,当发生故障时可以快速failover。\n
\n

附录2:

\n
关于grouplist问题说明下。\n1. 什么时候会用到file.conf中的default.grouplist?\n当registry.type=file时会用到,其他时候不读。\n2. default.grouplist的值列表是否可以配置多个?\n可以配置多个,配置多个意味着集群,但当store.mode=file时,会报错。原因是在file存储模式下未提供本地文件的同步,所以需要使用store.mode=db,通过db来共享TC集群间数据\n3. 是否推荐使用default.grouplist?\n不推荐,如问题1,当registry.type=file时会用到,也就是说这里用的不是真正的注册中心,不具体服务的健康检查机制当tc不可用时无法自动剔除列表,推荐使用nacos 、eureka、redis、zk、consul、etcd3、sofa。registry.type=file或config.type=file 设计的初衷是让用户再不依赖第三方注册中心或配置中心的前提下,通过直连的方式,快速验证seata服务。    \n
\n

附录3:

\n
log_status=1的是防御性的,是收到全局回滚请求,但是不确定某个事务分支的本地事务是否已经执行完成了,这时事先插入一条branchid相同的数据,插入的假数据成功了,本地事务继续执行就会报主键冲突自动回滚。\n假如插入不成功说明表里有数据这个本地事务已经执行完成了,那么取出这条undolog数据做反向回滚操作。
\n", - "link": "/zh-cn/docs/user/configurations-old.html", - "meta": {} -} \ No newline at end of file diff --git a/zh-cn/docs/user/configurations.html b/zh-cn/docs/user/configurations.html deleted file mode 100644 index 546261e0..00000000 --- a/zh-cn/docs/user/configurations.html +++ /dev/null @@ -1,356 +0,0 @@ - - - - - - - - - - Seata 参数配置 - - - - -
文档

seata参数配置 1.0.0版本

-

0.9.0.1之前版本 点击这里

-

变更记录

-
20191205: 增加seata.enabled、client.rm.report.success.enable
-
-

公共部分

- - - - - - - - - - - - - - - - - - - - - - - - - -
keydescremark
transport.serializationclient和server通信编解码方式seata(ByteBuf)、protobuf、kryo、hession,默认seata
transport.compressorclient和server通信数据压缩方式none、gzip,默认none
transport.heartbeatclient和server通信心跳检测开关默认true开启
-

server端

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
keydescremark
server.undo.log.save.daysundo保留天数默认7天,log_status=1(附录3)和未正常清理的undo
server.undo.log.delete.periodundo清理线程间隔时间默认86400000,单位毫秒
server.max.commit.retry.timeout二阶段提交重试超时时长单位毫秒,默认-1表示无限重试。公式: timeout>=now-globalTransactionBeginTime,true表示超时则不再重试
server.max.rollback.retry.timeout二阶段回滚重试超时时长同commit
server.recovery.committing-retry-period二阶段提交未完成状态全局事务重试提交线程间隔时间默认1000,单位毫秒
server.recovery.asyn-committing-retry-period二阶段异步提交状态重试提交线程间隔时间默认1000,单位毫秒
server.recovery.rollbacking-retry-period二阶段回滚状态重试回滚线程间隔时间默认1000,单位毫秒
server.recovery.timeout-retry-period超时状态检测重试线程间隔时间默认1000,单位毫秒,检测出超时将全局事务置入回滚会话管理器
store.mode事务会话信息存储方式file本地文件(不支持HA),db数据库(支持HA)
store.file.dirfile模式文件存储文件夹名默认sessionStore
store.db.datasourcedb模式数据源类型默认dbcp
store.db.db-typedb模式数据库类型默认mysql
store.db.driver-class-namedb模式数据库驱动默认com.mysql.jdbc.Driver
store.db.urldb模式数据源库url默认jdbc:mysql://127.0.0.1:3306/seata
store.db.userdb模式数据库账户默认mysql
store.db.min-conndb模式数据库初始连接数默认1
store.db.max-conndb模式数据库最大连接数默认3
store.db.global.tabledb模式全局事务表名默认global_table
store.db.branch.tabledb模式分支事务表名默认branch_table
store.db.lock-tabledb模式全局锁表名默认lock_table
store.db.query-limitdb模式查询全局事务一次的最大条数默认1000
metrics.enabled是否启用Metrics默认false关闭,在False状态下,所有与Metrics相关的组件将不会被初始化,使得性能损耗最低
metrics.registry-type指标注册器类型Metrics使用的指标注册器类型,默认为内置的compact(简易)实现,这个实现中的Meter仅使用有限内存计数,性能高足够满足大多数场景;目前只能设置一个指标注册器实现
metrics.exporter-list指标结果Measurement数据输出器列表默认prometheus,多个输出器使用英文逗号分割,例如"prometheus,jmx",目前仅实现了对接prometheus的输出器
metrics.exporter-prometheus-portprometheus输出器Client端口号默认9898
-

client端

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
keydescremark
seata.enabled是否开启spring-boot自动装配true、false,默认true(附录4)
client.rm.report.success.enable是否上报一阶段成功true、false,默认true用于保持分支事务生命周期记录完整,false可提高不少性能
service.vgroup_mapping.my_test_tx_group事务群组(附录1)my_test_tx_group为分组,配置项值为TC集群名
service.default.grouplistTC服务列表(附录2)仅注册中心为file时使用
service.disableGlobalTransaction全局事务开关默认false。false为开启,true为关闭
service.enableDegrade降级开关(待实现)默认false。业务侧根据连续错误数自动降级不走seata事务
client.rm.async.commit.buffer.limit异步提交缓存队列长度默认10000。 二阶段提交成功,RM异步清理undo队列
client.rm.lock.retry.internal校验或占用全局锁重试间隔默认10,单位毫秒
client.rm.lock.retry.times校验或占用全局锁重试次数默认30
client.rm.lock.retry.policy.branch-rollback-on-conflict分支事务与其它全局回滚事务冲突时锁策略默认true,优先释放本地锁让回滚成功
client.rm.report.retry.count一阶段结果上报TC重试次数默认5次
client.rm.table.meta.check.enable自动刷新缓存中的表结构默认true
client.tm.commit.retry.count一阶段全局提交结果上报TC重试次数默认1次,建议大于1
client.tm.rollback.retry.count一阶段全局回滚结果上报TC重试次数默认1次,建议大于1
client.undo.data.validation二阶段回滚镜像校验默认true开启,false关闭
client.undo.log.serializationundo序列化方式默认jackson
client.undo.log.table自定义undo表名默认undo_log
client.support.spring.datasource.autoproxy数据源自动代理开关默认false关闭
-

未使用

- - - - - - - - - - - - - - - - - - - - - - - - - -
keydescremark
lock.mode锁存储方式local、remote
lock.local
lock.remote
-

附录1:

-
事务分组说明。
-1.事务分组是什么?
-事务分组是seata的资源逻辑,类似于服务实例。在file.conf中的my_test_tx_group就是一个事务分组。
-2.通过事务分组如何找到后端集群?
-首先程序中配置了事务分组(GlobalTransactionScanner 构造方法的txServiceGroup参数),程序会通过用户配置的配置中心去寻找service.vgroup_mapping.事务分组配置项,取得配置项的值就是TC集群的名称。拿到集群名称程序通过一定的前后缀+集群名称去构造服务名,各配置中心的服务名实现不同。拿到服务名去相应的注册中心去拉取相应服务名的服务列表,获得后端真实的TC服务列表。
-3.为什么这么设计,不直接取服务名?
-这里多了一层获取事务分组到映射集群的配置。这样设计后,事务分组可以作为资源的逻辑隔离单位,当发生故障时可以快速failover。
-
-

附录2:

-
关于grouplist问题说明下。
-1. 什么时候会用到file.conf中的default.grouplist?
-当registry.type=file时会用到,其他时候不读。
-2. default.grouplist的值列表是否可以配置多个?
-可以配置多个,配置多个意味着集群,但当store.mode=file时,会报错。原因是在file存储模式下未提供本地文件的同步,所以需要使用store.mode=db,通过db来共享TC集群间数据
-3. 是否推荐使用default.grouplist?
-不推荐,如问题1,当registry.type=file时会用到,也就是说这里用的不是真正的注册中心,不具体服务的健康检查机制当tc不可用时无法自动剔除列表,推荐使用nacos 、eureka、redis、zk、consul、etcd3、sofa。registry.type=file或config.type=file 设计的初衷是让用户再不依赖第三方注册中心或配置中心的前提下,通过直连的方式,快速验证seata服务。    
-
-

附录3:

-
log_status=1的是防御性的,是收到全局回滚请求,但是不确定某个事务分支的本地事务是否已经执行完成了,这时事先插入一条branchid相同的数据,插入的假数据成功了,本地事务继续执行就会报主键冲突自动回滚。
-假如插入不成功说明表里有数据这个本地事务已经执行完成了,那么取出这条undolog数据做反向回滚操作。
-
-

附录4:

-
是否开启spring-boot自动装配,如果开启,则会自动配置seata与spring-boot的集成,包括数据源的自动代理以及GlobalTransactionScanner初始化。
-注:1.0版本新特性,需依赖seata-spring-boot-starter。
-
-
- - - - - - - diff --git a/zh-cn/docs/user/configurations.json b/zh-cn/docs/user/configurations.json deleted file mode 100644 index 497a3e6b..00000000 --- a/zh-cn/docs/user/configurations.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "configurations.md", - "__html": "

seata参数配置 1.0.0版本

\n

0.9.0.1之前版本 点击这里

\n

变更记录

\n
20191205: 增加seata.enabled、client.rm.report.success.enable\n
\n

公共部分

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
keydescremark
transport.serializationclient和server通信编解码方式seata(ByteBuf)、protobuf、kryo、hession,默认seata
transport.compressorclient和server通信数据压缩方式none、gzip,默认none
transport.heartbeatclient和server通信心跳检测开关默认true开启
\n

server端

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
keydescremark
server.undo.log.save.daysundo保留天数默认7天,log_status=1(附录3)和未正常清理的undo
server.undo.log.delete.periodundo清理线程间隔时间默认86400000,单位毫秒
server.max.commit.retry.timeout二阶段提交重试超时时长单位毫秒,默认-1表示无限重试。公式: timeout>=now-globalTransactionBeginTime,true表示超时则不再重试
server.max.rollback.retry.timeout二阶段回滚重试超时时长同commit
server.recovery.committing-retry-period二阶段提交未完成状态全局事务重试提交线程间隔时间默认1000,单位毫秒
server.recovery.asyn-committing-retry-period二阶段异步提交状态重试提交线程间隔时间默认1000,单位毫秒
server.recovery.rollbacking-retry-period二阶段回滚状态重试回滚线程间隔时间默认1000,单位毫秒
server.recovery.timeout-retry-period超时状态检测重试线程间隔时间默认1000,单位毫秒,检测出超时将全局事务置入回滚会话管理器
store.mode事务会话信息存储方式file本地文件(不支持HA),db数据库(支持HA)
store.file.dirfile模式文件存储文件夹名默认sessionStore
store.db.datasourcedb模式数据源类型默认dbcp
store.db.db-typedb模式数据库类型默认mysql
store.db.driver-class-namedb模式数据库驱动默认com.mysql.jdbc.Driver
store.db.urldb模式数据源库url默认jdbc:mysql://127.0.0.1:3306/seata
store.db.userdb模式数据库账户默认mysql
store.db.min-conndb模式数据库初始连接数默认1
store.db.max-conndb模式数据库最大连接数默认3
store.db.global.tabledb模式全局事务表名默认global_table
store.db.branch.tabledb模式分支事务表名默认branch_table
store.db.lock-tabledb模式全局锁表名默认lock_table
store.db.query-limitdb模式查询全局事务一次的最大条数默认1000
metrics.enabled是否启用Metrics默认false关闭,在False状态下,所有与Metrics相关的组件将不会被初始化,使得性能损耗最低
metrics.registry-type指标注册器类型Metrics使用的指标注册器类型,默认为内置的compact(简易)实现,这个实现中的Meter仅使用有限内存计数,性能高足够满足大多数场景;目前只能设置一个指标注册器实现
metrics.exporter-list指标结果Measurement数据输出器列表默认prometheus,多个输出器使用英文逗号分割,例如"prometheus,jmx",目前仅实现了对接prometheus的输出器
metrics.exporter-prometheus-portprometheus输出器Client端口号默认9898
\n

client端

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
keydescremark
seata.enabled是否开启spring-boot自动装配true、false,默认true(附录4)
client.rm.report.success.enable是否上报一阶段成功true、false,默认true用于保持分支事务生命周期记录完整,false可提高不少性能
service.vgroup_mapping.my_test_tx_group事务群组(附录1)my_test_tx_group为分组,配置项值为TC集群名
service.default.grouplistTC服务列表(附录2)仅注册中心为file时使用
service.disableGlobalTransaction全局事务开关默认false。false为开启,true为关闭
service.enableDegrade降级开关(待实现)默认false。业务侧根据连续错误数自动降级不走seata事务
client.rm.async.commit.buffer.limit异步提交缓存队列长度默认10000。 二阶段提交成功,RM异步清理undo队列
client.rm.lock.retry.internal校验或占用全局锁重试间隔默认10,单位毫秒
client.rm.lock.retry.times校验或占用全局锁重试次数默认30
client.rm.lock.retry.policy.branch-rollback-on-conflict分支事务与其它全局回滚事务冲突时锁策略默认true,优先释放本地锁让回滚成功
client.rm.report.retry.count一阶段结果上报TC重试次数默认5次
client.rm.table.meta.check.enable自动刷新缓存中的表结构默认true
client.tm.commit.retry.count一阶段全局提交结果上报TC重试次数默认1次,建议大于1
client.tm.rollback.retry.count一阶段全局回滚结果上报TC重试次数默认1次,建议大于1
client.undo.data.validation二阶段回滚镜像校验默认true开启,false关闭
client.undo.log.serializationundo序列化方式默认jackson
client.undo.log.table自定义undo表名默认undo_log
client.support.spring.datasource.autoproxy数据源自动代理开关默认false关闭
\n

未使用

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
keydescremark
lock.mode锁存储方式local、remote
lock.local
lock.remote
\n

附录1:

\n
事务分组说明。\n1.事务分组是什么?\n事务分组是seata的资源逻辑,类似于服务实例。在file.conf中的my_test_tx_group就是一个事务分组。\n2.通过事务分组如何找到后端集群?\n首先程序中配置了事务分组(GlobalTransactionScanner 构造方法的txServiceGroup参数),程序会通过用户配置的配置中心去寻找service.vgroup_mapping.事务分组配置项,取得配置项的值就是TC集群的名称。拿到集群名称程序通过一定的前后缀+集群名称去构造服务名,各配置中心的服务名实现不同。拿到服务名去相应的注册中心去拉取相应服务名的服务列表,获得后端真实的TC服务列表。\n3.为什么这么设计,不直接取服务名?\n这里多了一层获取事务分组到映射集群的配置。这样设计后,事务分组可以作为资源的逻辑隔离单位,当发生故障时可以快速failover。\n
\n

附录2:

\n
关于grouplist问题说明下。\n1. 什么时候会用到file.conf中的default.grouplist?\n当registry.type=file时会用到,其他时候不读。\n2. default.grouplist的值列表是否可以配置多个?\n可以配置多个,配置多个意味着集群,但当store.mode=file时,会报错。原因是在file存储模式下未提供本地文件的同步,所以需要使用store.mode=db,通过db来共享TC集群间数据\n3. 是否推荐使用default.grouplist?\n不推荐,如问题1,当registry.type=file时会用到,也就是说这里用的不是真正的注册中心,不具体服务的健康检查机制当tc不可用时无法自动剔除列表,推荐使用nacos 、eureka、redis、zk、consul、etcd3、sofa。registry.type=file或config.type=file 设计的初衷是让用户再不依赖第三方注册中心或配置中心的前提下,通过直连的方式,快速验证seata服务。    \n
\n

附录3:

\n
log_status=1的是防御性的,是收到全局回滚请求,但是不确定某个事务分支的本地事务是否已经执行完成了,这时事先插入一条branchid相同的数据,插入的假数据成功了,本地事务继续执行就会报主键冲突自动回滚。\n假如插入不成功说明表里有数据这个本地事务已经执行完成了,那么取出这条undolog数据做反向回滚操作。\n
\n

附录4:

\n
是否开启spring-boot自动装配,如果开启,则会自动配置seata与spring-boot的集成,包括数据源的自动代理以及GlobalTransactionScanner初始化。\n注:1.0版本新特性,需依赖seata-spring-boot-starter。\n
\n", - "link": "/zh-cn/docs/user/configurations.html", - "meta": { - "title": "Seata 参数配置", - "keywords": "Seata", - "description": "Seata 参数配置。" - } -} \ No newline at end of file diff --git a/zh-cn/docs/user/configurations090.html b/zh-cn/docs/user/configurations090.html deleted file mode 100644 index cd022398..00000000 --- a/zh-cn/docs/user/configurations090.html +++ /dev/null @@ -1,349 +0,0 @@ - - - - - - - - - - Seata 参数配置 0.9.0版本 - - - - -
文档

seata参数配置 0.9.0版本

-

公共部分

- - - - - - - - - - - - - - - - - - - - -
keydescremark
transport.serializationclient和server通信编解码方式seata、protobuf
transport.heartbeatclient和server通信心跳检测开关默认true开启
-

server端

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
keydescremark
transaction.undo.log.save.daysundo保留天数默认7天,log_status=1(附录3)和未正常清理的undo
transaction.undo.log.delete.periodundo清理线程间隔时间默认86400000,单位毫秒
service.max.commit.retry.timeout二阶段提交重试超时时长单位毫秒,默认-1表示无限重试。公式: timeout>=now-globalTransactionBeginTime,true表示超时则不再重试
service.max.rollback.retry.timeout二阶段回滚重试超时时长同commit
recovery.committing-retry-period二阶段提交未完成状态全局事务重试提交线程间隔时间默认1000,单位毫秒
recovery.asyn-committing-retry-period二阶段异步提交状态重试提交线程间隔时间默认1000,单位毫秒
recovery.rollbacking-retry-period二阶段回滚状态重试回滚线程间隔时间默认1000,单位毫秒
recovery.timeout-retry-period超时状态检测重试线程间隔时间默认1000,单位毫秒,检测出超时将全局事务置入回滚会话管理器
store.mode事务会话信息存储方式file本地文件(不支持HA),db数据库(支持HA)
store.file.dirfile模式文件存储文件夹名默认sessionStore
store.db.datasourcedb模式数据源类型默认dbcp
store.db.db-typedb模式数据库类型默认mysql
store.db.driver-class-namedb模式数据库驱动默认com.mysql.jdbc.Driver
store.db.urldb模式数据源库url默认jdbc:mysql://127.0.0.1:3306/seata
store.db.userdb模式数据库账户默认mysql
store.db.min-conndb模式数据库初始连接数默认1
store.db.max-conndb模式数据库最大连接数默认3
store.db.global.tabledb模式全局事务表名默认global_table
store.db.branch.tabledb模式分支事务表名默认branch_table
store.db.lock-tabledb模式全局锁表名默认lock_table
store.db.query-limitdb模式查询全局事务一次的最大条数默认1000
metrics.enabled是否启用Metrics默认false关闭,在False状态下,所有与Metrics相关的组件将不会被初始化,使得性能损耗最低
metrics.registry-type指标注册器类型Metrics使用的指标注册器类型,默认为内置的compact(简易)实现,这个实现中的Meter仅使用有限内存计数,性能高足够满足大多数场景;目前只能设置一个指标注册器实现
metrics.exporter-list指标结果Measurement数据输出器列表默认prometheus,多个输出器使用英文逗号分割,例如"prometheus,jmx",目前仅实现了对接prometheus的输出器
metrics.exporter-prometheus-portprometheus输出器Client端口号默认9898
-

client端

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
keyroledescremark
service.vgroup_mapping.my_test_tx_groupTM,RM事务群组(附录1)my_test_tx_group为分组,配置项值为TC集群名
service.default.grouplistTM,RMTC服务列表(附录2)仅注册中心为file时使用
service.disableGlobalTransactionTM,RM全局事务开关默认false。false为开启,true为关闭
service.enableDegradeTM降级开关(待实现)默认false。业务侧根据连续错误数自动降级不走seata事务
client.async.commit.buffer.limitRM异步提交缓存队列长度默认10000。 二阶段提交成功,RM异步清理undo队列
client.lock.retry.internalRM校验或占用全局锁重试间隔默认10,单位毫秒
client.lock.retry.timesRM校验或占用全局锁重试次数默认30
client.lock.retry.policy.branch-rollback-on-conflictRM分支事务与其它全局回滚事务冲突时锁策略默认true,优先释放本地锁让回滚成功
client.report.retry.countTM,RM一阶段结果上报TC重试次数默认5次
client.tm.commit.retry.countTM一阶段全局提交结果上报TC重试次数默认1次,建议大于1
client.tm.rollback.retry.countTM一阶段全局回滚结果上报TC重试次数默认1次,建议大于1
client.table.meta.check.enableRM自动刷新缓存中的表结构默认true
transaction.undo.data.validationRM二阶段回滚镜像校验默认true开启,false关闭
transaction.undo.log.serializationRMundo序列化方式默认jackson
transaction.undo.log.tableRM自定义undo表名默认undo_log
support.spring.datasource.autoproxyRM数据源自动代理开关默认false关闭
-

未使用

- - - - - - - - - - - - - - - - - - - - - - - - - -
keydescremark
lock.mode锁存储方式local、remote
lock.local
lock.remote
-

附录1:

-
事务分组说明。
-1.事务分组是什么?
-事务分组是seata的资源逻辑,类似于服务实例。在file.conf中的my_test_tx_group就是一个事务分组。
-2.通过事务分组如何找到后端集群?
-首先程序中配置了事务分组(GlobalTransactionScanner 构造方法的txServiceGroup参数),程序会通过用户配置的配置中心去寻找service.vgroup_mapping.事务分组配置项,取得配置项的值就是TC集群的名称。拿到集群名称程序通过一定的前后缀+集群名称去构造服务名,各配置中心的服务名实现不同。拿到服务名去相应的注册中心去拉取相应服务名的服务列表,获得后端真实的TC服务列表。
-3.为什么这么设计,不直接取服务名?
-这里多了一层获取事务分组到映射集群的配置。这样设计后,事务分组可以作为资源的逻辑隔离单位,当发生故障时可以快速failover。
-
-

附录2:

-
关于grouplist问题说明下。
-1. 什么时候会用到file.conf中的default.grouplist?
-当registry.type=file时会用到,其他时候不读。
-2. default.grouplist的值列表是否可以配置多个?
-可以配置多个,配置多个意味着集群,但当store.mode=file时,会报错。原因是在file存储模式下未提供本地文件的同步,所以需要使用store.mode=db,通过db来共享TC集群间数据
-3. 是否推荐使用default.grouplist?
-不推荐,如问题1,当registry.type=file时会用到,也就是说这里用的不是真正的注册中心,不具体服务的健康检查机制当tc不可用时无法自动剔除列表,推荐使用nacos 、eureka、redis、zk、consul、etcd3、sofa。registry.type=file或config.type=file 设计的初衷是让用户再不依赖第三方注册中心或配置中心的前提下,通过直连的方式,快速验证seata服务。    
-
-

附录3:

-
log_status=1的是防御性的,是收到全局回滚请求,但是不确定某个事务分支的本地事务是否已经执行完成了,这时事先插入一条branchid相同的数据,插入的假数据成功了,本地事务继续执行就会报主键冲突自动回滚。
-假如插入不成功说明表里有数据这个本地事务已经执行完成了,那么取出这条undolog数据做反向回滚操作。
-
- - - - - - - diff --git a/zh-cn/docs/user/configurations090.json b/zh-cn/docs/user/configurations090.json deleted file mode 100644 index 59ad4537..00000000 --- a/zh-cn/docs/user/configurations090.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "configurations090.md", - "__html": "

seata参数配置 0.9.0版本

\n

公共部分

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
keydescremark
transport.serializationclient和server通信编解码方式seata、protobuf
transport.heartbeatclient和server通信心跳检测开关默认true开启
\n

server端

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
keydescremark
transaction.undo.log.save.daysundo保留天数默认7天,log_status=1(附录3)和未正常清理的undo
transaction.undo.log.delete.periodundo清理线程间隔时间默认86400000,单位毫秒
service.max.commit.retry.timeout二阶段提交重试超时时长单位毫秒,默认-1表示无限重试。公式: timeout>=now-globalTransactionBeginTime,true表示超时则不再重试
service.max.rollback.retry.timeout二阶段回滚重试超时时长同commit
recovery.committing-retry-period二阶段提交未完成状态全局事务重试提交线程间隔时间默认1000,单位毫秒
recovery.asyn-committing-retry-period二阶段异步提交状态重试提交线程间隔时间默认1000,单位毫秒
recovery.rollbacking-retry-period二阶段回滚状态重试回滚线程间隔时间默认1000,单位毫秒
recovery.timeout-retry-period超时状态检测重试线程间隔时间默认1000,单位毫秒,检测出超时将全局事务置入回滚会话管理器
store.mode事务会话信息存储方式file本地文件(不支持HA),db数据库(支持HA)
store.file.dirfile模式文件存储文件夹名默认sessionStore
store.db.datasourcedb模式数据源类型默认dbcp
store.db.db-typedb模式数据库类型默认mysql
store.db.driver-class-namedb模式数据库驱动默认com.mysql.jdbc.Driver
store.db.urldb模式数据源库url默认jdbc:mysql://127.0.0.1:3306/seata
store.db.userdb模式数据库账户默认mysql
store.db.min-conndb模式数据库初始连接数默认1
store.db.max-conndb模式数据库最大连接数默认3
store.db.global.tabledb模式全局事务表名默认global_table
store.db.branch.tabledb模式分支事务表名默认branch_table
store.db.lock-tabledb模式全局锁表名默认lock_table
store.db.query-limitdb模式查询全局事务一次的最大条数默认1000
metrics.enabled是否启用Metrics默认false关闭,在False状态下,所有与Metrics相关的组件将不会被初始化,使得性能损耗最低
metrics.registry-type指标注册器类型Metrics使用的指标注册器类型,默认为内置的compact(简易)实现,这个实现中的Meter仅使用有限内存计数,性能高足够满足大多数场景;目前只能设置一个指标注册器实现
metrics.exporter-list指标结果Measurement数据输出器列表默认prometheus,多个输出器使用英文逗号分割,例如"prometheus,jmx",目前仅实现了对接prometheus的输出器
metrics.exporter-prometheus-portprometheus输出器Client端口号默认9898
\n

client端

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
keyroledescremark
service.vgroup_mapping.my_test_tx_groupTM,RM事务群组(附录1)my_test_tx_group为分组,配置项值为TC集群名
service.default.grouplistTM,RMTC服务列表(附录2)仅注册中心为file时使用
service.disableGlobalTransactionTM,RM全局事务开关默认false。false为开启,true为关闭
service.enableDegradeTM降级开关(待实现)默认false。业务侧根据连续错误数自动降级不走seata事务
client.async.commit.buffer.limitRM异步提交缓存队列长度默认10000。 二阶段提交成功,RM异步清理undo队列
client.lock.retry.internalRM校验或占用全局锁重试间隔默认10,单位毫秒
client.lock.retry.timesRM校验或占用全局锁重试次数默认30
client.lock.retry.policy.branch-rollback-on-conflictRM分支事务与其它全局回滚事务冲突时锁策略默认true,优先释放本地锁让回滚成功
client.report.retry.countTM,RM一阶段结果上报TC重试次数默认5次
client.tm.commit.retry.countTM一阶段全局提交结果上报TC重试次数默认1次,建议大于1
client.tm.rollback.retry.countTM一阶段全局回滚结果上报TC重试次数默认1次,建议大于1
client.table.meta.check.enableRM自动刷新缓存中的表结构默认true
transaction.undo.data.validationRM二阶段回滚镜像校验默认true开启,false关闭
transaction.undo.log.serializationRMundo序列化方式默认jackson
transaction.undo.log.tableRM自定义undo表名默认undo_log
support.spring.datasource.autoproxyRM数据源自动代理开关默认false关闭
\n

未使用

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
keydescremark
lock.mode锁存储方式local、remote
lock.local
lock.remote
\n

附录1:

\n
事务分组说明。\n1.事务分组是什么?\n事务分组是seata的资源逻辑,类似于服务实例。在file.conf中的my_test_tx_group就是一个事务分组。\n2.通过事务分组如何找到后端集群?\n首先程序中配置了事务分组(GlobalTransactionScanner 构造方法的txServiceGroup参数),程序会通过用户配置的配置中心去寻找service.vgroup_mapping.事务分组配置项,取得配置项的值就是TC集群的名称。拿到集群名称程序通过一定的前后缀+集群名称去构造服务名,各配置中心的服务名实现不同。拿到服务名去相应的注册中心去拉取相应服务名的服务列表,获得后端真实的TC服务列表。\n3.为什么这么设计,不直接取服务名?\n这里多了一层获取事务分组到映射集群的配置。这样设计后,事务分组可以作为资源的逻辑隔离单位,当发生故障时可以快速failover。\n
\n

附录2:

\n
关于grouplist问题说明下。\n1. 什么时候会用到file.conf中的default.grouplist?\n当registry.type=file时会用到,其他时候不读。\n2. default.grouplist的值列表是否可以配置多个?\n可以配置多个,配置多个意味着集群,但当store.mode=file时,会报错。原因是在file存储模式下未提供本地文件的同步,所以需要使用store.mode=db,通过db来共享TC集群间数据\n3. 是否推荐使用default.grouplist?\n不推荐,如问题1,当registry.type=file时会用到,也就是说这里用的不是真正的注册中心,不具体服务的健康检查机制当tc不可用时无法自动剔除列表,推荐使用nacos 、eureka、redis、zk、consul、etcd3、sofa。registry.type=file或config.type=file 设计的初衷是让用户再不依赖第三方注册中心或配置中心的前提下,通过直连的方式,快速验证seata服务。    \n
\n

附录3:

\n
log_status=1的是防御性的,是收到全局回滚请求,但是不确定某个事务分支的本地事务是否已经执行完成了,这时事先插入一条branchid相同的数据,插入的假数据成功了,本地事务继续执行就会报主键冲突自动回滚。\n假如插入不成功说明表里有数据这个本地事务已经执行完成了,那么取出这条undolog数据做反向回滚操作。
\n", - "link": "/zh-cn/docs/user/configurations090.html", - "meta": { - "title": "Seata 参数配置 0.9.0版本", - "keywords": "Seata", - "description": "Seata 参数配置 0.9.0版本。" - } -} \ No newline at end of file diff --git a/zh-cn/docs/user/datasource.html b/zh-cn/docs/user/datasource.html deleted file mode 100644 index 993dfd54..00000000 --- a/zh-cn/docs/user/datasource.html +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - - - - - Seata 数据源支持 - - - - -
文档

数据源支持

-

TBD

-
- - - - - - - diff --git a/zh-cn/docs/user/datasource.json b/zh-cn/docs/user/datasource.json deleted file mode 100644 index a807313a..00000000 --- a/zh-cn/docs/user/datasource.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "datasource.md", - "__html": "

数据源支持

\n

TBD

\n", - "link": "/zh-cn/docs/user/datasource.html", - "meta": { - "title": "Seata 数据源支持", - "keywords": "Seata", - "description": "Seata 数据源支持。" - } -} \ No newline at end of file diff --git a/zh-cn/docs/user/microservice.html b/zh-cn/docs/user/microservice.html deleted file mode 100644 index 4fd0ec3a..00000000 --- a/zh-cn/docs/user/microservice.html +++ /dev/null @@ -1,148 +0,0 @@ - - - - - - - - - - Seata 微服务框架支持 - - - - -
文档

事务上下文

-

Seata 的事务上下文由 RootContext 来管理。

-

应用开启一个全局事务后,RootContext 会自动绑定该事务的 XID,事务结束(提交或回滚完成),RootContext 会自动解绑 XID。

-
// 绑定 XID
-RootContext.bind(xid);
-
-// 解绑 XID
-String xid = RootContext.unbind();
-
-

应用可以通过 RootContext 的 API 接口来获取当前运行时的全局事务 XID。

-
// 获取 XID
-String xid = RootContext.getXID();
-
-

应用是否运行在一个全局事务的上下文中,就是通过 RootContext 是否绑定 XID 来判定的。

-
    public static boolean inGlobalTransaction() {
-        return CONTEXT_HOLDER.get(KEY_XID) != null;
-    }
-
-

事务传播

-

Seata 全局事务的传播机制就是指事务上下文的传播,根本上,就是 XID 的应用运行时的传播方式。

-

1. 服务内部的事务传播

-

默认的,RootContext 的实现是基于 ThreadLocal 的,即 XID 绑定在当前线程上下文中。

-
public class ThreadLocalContextCore implements ContextCore {
-
-    private ThreadLocal<Map<String, String>> threadLocal = new ThreadLocal<Map<String, String>>() {
-        @Override
-        protected Map<String, String> initialValue() {
-            return new HashMap<String, String>();
-        }
-
-    };
-
-    @Override
-    public String put(String key, String value) {
-        return threadLocal.get().put(key, value);
-    }
-
-    @Override
-    public String get(String key) {
-        return threadLocal.get().get(key);
-    }
-
-    @Override
-    public String remove(String key) {
-        return threadLocal.get().remove(key);
-    }
-}
-
-

所以服务内部的 XID 传播通常是天然的通过同一个线程的调用链路串连起来的。默认不做任何处理,事务的上下文就是传播下去的。

-

如果希望挂起事务上下文,则需要通过 RootContext 提供的 API 来实现:

-
// 挂起(暂停)
-String xid = RootContext.unbind();
-
-// TODO: 运行在全局事务外的业务逻辑
-
-// 恢复全局事务上下文
-RootContext.bind(xid);
-
-
-

2. 跨服务调用的事务传播

-

通过上述基本原理,我们可以很容易理解:

-
-

跨服务调用场景下的事务传播,本质上就是要把 XID 通过服务调用传递到服务提供方,并绑定到 RootContext 中去。

-
-

只要能做到这点,理论上 Seata 可以支持任意的微服务框架。

-

对 Dubbo 支持的解读

-

下面,我们通过内置的对 Dubbo RPC 支持机制的解读,来说明 Seata 在实现对一个特定微服务框架支持的机制。

-

对 Dubbo 的支持,我们利用了 Dubbo 框架的 org.apache.dubbo.rpc.Filter 机制。

-
/**
- * The type Transaction propagation filter.
- */
-@Activate(group = { Constants.PROVIDER, Constants.CONSUMER }, order = 100)
-public class TransactionPropagationFilter implements Filter {
-
-    private static final Logger LOGGER = LoggerFactory.getLogger(TransactionPropagationFilter.class);
-
-    @Override
-    public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException {
-        String xid = RootContext.getXID(); // 获取当前事务 XID
-        String rpcXid = RpcContext.getContext().getAttachment(RootContext.KEY_XID); // 获取 RPC 调用传递过来的 XID
-        if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug("xid in RootContext[" + xid + "] xid in RpcContext[" + rpcXid + "]");
-        }
-        boolean bind = false;
-        if (xid != null) { // Consumer:把 XID 置入 RPC 的 attachment 中
-            RpcContext.getContext().setAttachment(RootContext.KEY_XID, xid);
-        } else {
-            if (rpcXid != null) { // Provider:把 RPC 调用传递来的 XID 绑定到当前运行时
-                RootContext.bind(rpcXid);
-                bind = true;
-                if (LOGGER.isDebugEnabled()) {
-                    LOGGER.debug("bind[" + rpcXid + "] to RootContext");
-                }
-            }
-        }
-        try {
-            return invoker.invoke(invocation); // 业务方法的调用
-
-        } finally {
-            if (bind) { // Provider:调用完成后,对 XID 的清理
-                String unbindXid = RootContext.unbind();
-                if (LOGGER.isDebugEnabled()) {
-                    LOGGER.debug("unbind[" + unbindXid + "] from RootContext");
-                }
-                if (!rpcXid.equalsIgnoreCase(unbindXid)) {
-                    LOGGER.warn("xid in change during RPC from " + rpcXid + " to " + unbindXid);
-                    if (unbindXid != null) { // 调用过程有新的事务上下文开启,则不能清除
-                        RootContext.bind(unbindXid);
-                        LOGGER.warn("bind [" + unbindXid + "] back to RootContext");
-                    }
-                }
-            }
-        }
-    }
-}
-
-
- - - - - - - diff --git a/zh-cn/docs/user/microservice.json b/zh-cn/docs/user/microservice.json deleted file mode 100644 index 62611606..00000000 --- a/zh-cn/docs/user/microservice.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "microservice.md", - "__html": "

事务上下文

\n

Seata 的事务上下文由 RootContext 来管理。

\n

应用开启一个全局事务后,RootContext 会自动绑定该事务的 XID,事务结束(提交或回滚完成),RootContext 会自动解绑 XID。

\n
// 绑定 XID\nRootContext.bind(xid);\n\n// 解绑 XID\nString xid = RootContext.unbind();\n
\n

应用可以通过 RootContext 的 API 接口来获取当前运行时的全局事务 XID。

\n
// 获取 XID\nString xid = RootContext.getXID();\n
\n

应用是否运行在一个全局事务的上下文中,就是通过 RootContext 是否绑定 XID 来判定的。

\n
    public static boolean inGlobalTransaction() {\n        return CONTEXT_HOLDER.get(KEY_XID) != null;\n    }\n
\n

事务传播

\n

Seata 全局事务的传播机制就是指事务上下文的传播,根本上,就是 XID 的应用运行时的传播方式。

\n

1. 服务内部的事务传播

\n

默认的,RootContext 的实现是基于 ThreadLocal 的,即 XID 绑定在当前线程上下文中。

\n
public class ThreadLocalContextCore implements ContextCore {\n\n    private ThreadLocal<Map<String, String>> threadLocal = new ThreadLocal<Map<String, String>>() {\n        @Override\n        protected Map<String, String> initialValue() {\n            return new HashMap<String, String>();\n        }\n\n    };\n\n    @Override\n    public String put(String key, String value) {\n        return threadLocal.get().put(key, value);\n    }\n\n    @Override\n    public String get(String key) {\n        return threadLocal.get().get(key);\n    }\n\n    @Override\n    public String remove(String key) {\n        return threadLocal.get().remove(key);\n    }\n}\n
\n

所以服务内部的 XID 传播通常是天然的通过同一个线程的调用链路串连起来的。默认不做任何处理,事务的上下文就是传播下去的。

\n

如果希望挂起事务上下文,则需要通过 RootContext 提供的 API 来实现:

\n
// 挂起(暂停)\nString xid = RootContext.unbind();\n\n// TODO: 运行在全局事务外的业务逻辑\n\n// 恢复全局事务上下文\nRootContext.bind(xid);\n\n
\n

2. 跨服务调用的事务传播

\n

通过上述基本原理,我们可以很容易理解:

\n
\n

跨服务调用场景下的事务传播,本质上就是要把 XID 通过服务调用传递到服务提供方,并绑定到 RootContext 中去。

\n
\n

只要能做到这点,理论上 Seata 可以支持任意的微服务框架。

\n

对 Dubbo 支持的解读

\n

下面,我们通过内置的对 Dubbo RPC 支持机制的解读,来说明 Seata 在实现对一个特定微服务框架支持的机制。

\n

对 Dubbo 的支持,我们利用了 Dubbo 框架的 org.apache.dubbo.rpc.Filter 机制。

\n
/**\n * The type Transaction propagation filter.\n */\n@Activate(group = { Constants.PROVIDER, Constants.CONSUMER }, order = 100)\npublic class TransactionPropagationFilter implements Filter {\n\n    private static final Logger LOGGER = LoggerFactory.getLogger(TransactionPropagationFilter.class);\n\n    @Override\n    public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException {\n        String xid = RootContext.getXID(); // 获取当前事务 XID\n        String rpcXid = RpcContext.getContext().getAttachment(RootContext.KEY_XID); // 获取 RPC 调用传递过来的 XID\n        if (LOGGER.isDebugEnabled()) {\n            LOGGER.debug(\"xid in RootContext[\" + xid + \"] xid in RpcContext[\" + rpcXid + \"]\");\n        }\n        boolean bind = false;\n        if (xid != null) { // Consumer:把 XID 置入 RPC 的 attachment 中\n            RpcContext.getContext().setAttachment(RootContext.KEY_XID, xid);\n        } else {\n            if (rpcXid != null) { // Provider:把 RPC 调用传递来的 XID 绑定到当前运行时\n                RootContext.bind(rpcXid);\n                bind = true;\n                if (LOGGER.isDebugEnabled()) {\n                    LOGGER.debug(\"bind[\" + rpcXid + \"] to RootContext\");\n                }\n            }\n        }\n        try {\n            return invoker.invoke(invocation); // 业务方法的调用\n\n        } finally {\n            if (bind) { // Provider:调用完成后,对 XID 的清理\n                String unbindXid = RootContext.unbind();\n                if (LOGGER.isDebugEnabled()) {\n                    LOGGER.debug(\"unbind[\" + unbindXid + \"] from RootContext\");\n                }\n                if (!rpcXid.equalsIgnoreCase(unbindXid)) {\n                    LOGGER.warn(\"xid in change during RPC from \" + rpcXid + \" to \" + unbindXid);\n                    if (unbindXid != null) { // 调用过程有新的事务上下文开启,则不能清除\n                        RootContext.bind(unbindXid);\n                        LOGGER.warn(\"bind [\" + unbindXid + \"] back to RootContext\");\n                    }\n                }\n            }\n        }\n    }\n}\n
\n", - "link": "/zh-cn/docs/user/microservice.html", - "meta": { - "title": "Seata 微服务框架支持", - "keywords": "Seata", - "description": "Seata 微服务框架支持。" - } -} \ No newline at end of file diff --git a/zh-cn/docs/user/ormframework.html b/zh-cn/docs/user/ormframework.html deleted file mode 100644 index 2631634f..00000000 --- a/zh-cn/docs/user/ormframework.html +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - - - - - Seata ORM框架支持 - - - - -
文档

ORM框架支持

-

TBD

-
- - - - - - - diff --git a/zh-cn/docs/user/ormframework.json b/zh-cn/docs/user/ormframework.json deleted file mode 100644 index a9285867..00000000 --- a/zh-cn/docs/user/ormframework.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "ormframework.md", - "__html": "

ORM框架支持

\n

TBD

\n", - "link": "/zh-cn/docs/user/ormframework.html", - "meta": { - "title": "Seata ORM框架支持", - "keywords": "Seata", - "description": "Seata ORM框架支持。" - } -} \ No newline at end of file diff --git a/zh-cn/docs/user/quickstart.html b/zh-cn/docs/user/quickstart.html deleted file mode 100644 index f8188774..00000000 --- a/zh-cn/docs/user/quickstart.html +++ /dev/null @@ -1,212 +0,0 @@ - - - - - - - - - - Seata 快速开始 - - - - -
文档

快速开始

-

让我们从一个微服务示例开始。

-

用例

-

用户购买商品的业务逻辑。整个业务逻辑由3个微服务提供支持:

-
    -
  • 仓储服务:对给定的商品扣除仓储数量。
  • -
  • 订单服务:根据采购需求创建订单。
  • -
  • 帐户服务:从用户帐户中扣除余额。
  • -
-

架构图

-

Architecture

-

仓储服务

-
public interface StorageService {
-
-    /**
-     * 扣除存储数量
-     */
-    void deduct(String commodityCode, int count);
-}
-
-

订单服务

-
public interface OrderService {
-
-    /**
-     * 创建订单
-     */
-    Order create(String userId, String commodityCode, int orderCount);
-}
-
-

帐户服务

-
public interface AccountService {
-
-    /**
-     * 从用户账户中借出
-     */
-    void debit(String userId, int money);
-}
-
-

主要业务逻辑

-
public class BusinessServiceImpl implements BusinessService {
-
-    private StorageService storageService;
-
-    private OrderService orderService;
-
-    /**
-     * 采购
-     */
-    public void purchase(String userId, String commodityCode, int orderCount) {
-
-        storageService.deduct(commodityCode, orderCount);
-
-        orderService.create(userId, commodityCode, orderCount);
-    }
-}
-
-
public class OrderServiceImpl implements OrderService {
-
-    private OrderDAO orderDAO;
-
-    private AccountService accountService;
-
-    public Order create(String userId, String commodityCode, int orderCount) {
-
-        int orderMoney = calculate(commodityCode, orderCount);
-
-        accountService.debit(userId, orderMoney);
-
-        Order order = new Order();
-        order.userId = userId;
-        order.commodityCode = commodityCode;
-        order.count = orderCount;
-        order.money = orderMoney;
-
-        // INSERT INTO orders ...
-        return orderDAO.insert(order);
-    }
-}
-
-

SEATA 的分布式交易解决方案

-

-我们只需要使用一个 @GlobalTransactional 注解在业务方法上:

-

-    @GlobalTransactional
-    public void purchase(String userId, String commodityCode, int orderCount) {
-        ......
-    }
-
-

由Dubbo + SEATA提供支持的示例

-

步骤 1:建立数据库

-
    -
  • 要求:具有InnoDB引擎的MySQL。
  • -
-

注意: 实际上,在示例用例中,这3个服务应该有3个数据库。 但是,为了简单起见,我们只能创建一个数据库并配置3个数据源。

-

使用您刚创建的数据库 URL/username/password 修改Spring XML。

-

dubbo-account-service.xml -dubbo-order-service.xml -dubbo-storage-service.xml

-
        <property name="url" value="jdbc:mysql://x.x.x.x:3306/xxx" />
-        <property name="username" value="xxx" />
-        <property name="password" value="xxx" />
-
-

步骤 2:创建 UNDO_LOG 表

-

SEATA AT 模式需要 UNDO_LOG

-
-- 注意此处0.3.0+ 增加唯一索引 ux_undo_log
-CREATE TABLE `undo_log` (
-  `id` bigint(20) NOT NULL AUTO_INCREMENT,
-  `branch_id` bigint(20) NOT NULL,
-  `xid` varchar(100) NOT NULL,
-  `context` varchar(128) NOT NULL,
-  `rollback_info` longblob NOT NULL,
-  `log_status` int(11) NOT NULL,
-  `log_created` datetime NOT NULL,
-  `log_modified` datetime NOT NULL,
-  `ext` varchar(100) DEFAULT NULL,
-  PRIMARY KEY (`id`),
-  UNIQUE KEY `ux_undo_log` (`xid`,`branch_id`)
-) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-
-

步骤 3:为示例业务创建表

-

-DROP TABLE IF EXISTS `storage_tbl`;
-CREATE TABLE `storage_tbl` (
-  `id` int(11) NOT NULL AUTO_INCREMENT,
-  `commodity_code` varchar(255) DEFAULT NULL,
-  `count` int(11) DEFAULT 0,
-  PRIMARY KEY (`id`),
-  UNIQUE KEY (`commodity_code`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-
-
-DROP TABLE IF EXISTS `order_tbl`;
-CREATE TABLE `order_tbl` (
-  `id` int(11) NOT NULL AUTO_INCREMENT,
-  `user_id` varchar(255) DEFAULT NULL,
-  `commodity_code` varchar(255) DEFAULT NULL,
-  `count` int(11) DEFAULT 0,
-  `money` int(11) DEFAULT 0,
-  PRIMARY KEY (`id`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-
-
-DROP TABLE IF EXISTS `account_tbl`;
-CREATE TABLE `account_tbl` (
-  `id` int(11) NOT NULL AUTO_INCREMENT,
-  `user_id` varchar(255) DEFAULT NULL,
-  `money` int(11) DEFAULT 0,
-  PRIMARY KEY (`id`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-
-

步骤 4: 启动服务

- -
Usage: sh seata-server.sh(for linux and mac) or cmd seata-server.bat(for windows) [options]
-  Options:
-    --host, -h
-      The host to bind.
-      Default: 0.0.0.0
-    --port, -p
-      The port to listen.
-      Default: 8091
-    --storeMode, -m
-      log store mode : file、db
-      Default: file
-    --help
-
-e.g.
-
-sh seata-server.sh -p 8091 -h 127.0.0.1 -m file
-
-

步骤 5: 运行示例

-

示例仓库: seata-samples

-
    -
  • 启动 DubboAccountServiceStarter
  • -
  • 启动 DubboStorageServiceStarter
  • -
  • 启动 DubboOrderServiceStarter
  • -
  • 运行 DubboBusinessTester for demo test
  • -
-

TBD: 运行演示应用程序的脚本

-
- - - - - - - diff --git a/zh-cn/docs/user/quickstart.json b/zh-cn/docs/user/quickstart.json deleted file mode 100644 index 6afa0900..00000000 --- a/zh-cn/docs/user/quickstart.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "quickstart.md", - "__html": "

快速开始

\n

让我们从一个微服务示例开始。

\n

用例

\n

用户购买商品的业务逻辑。整个业务逻辑由3个微服务提供支持:

\n
    \n
  • 仓储服务:对给定的商品扣除仓储数量。
  • \n
  • 订单服务:根据采购需求创建订单。
  • \n
  • 帐户服务:从用户帐户中扣除余额。
  • \n
\n

架构图

\n

\"Architecture\"

\n

仓储服务

\n
public interface StorageService {\n\n    /**\n     * 扣除存储数量\n     */\n    void deduct(String commodityCode, int count);\n}\n
\n

订单服务

\n
public interface OrderService {\n\n    /**\n     * 创建订单\n     */\n    Order create(String userId, String commodityCode, int orderCount);\n}\n
\n

帐户服务

\n
public interface AccountService {\n\n    /**\n     * 从用户账户中借出\n     */\n    void debit(String userId, int money);\n}\n
\n

主要业务逻辑

\n
public class BusinessServiceImpl implements BusinessService {\n\n    private StorageService storageService;\n\n    private OrderService orderService;\n\n    /**\n     * 采购\n     */\n    public void purchase(String userId, String commodityCode, int orderCount) {\n\n        storageService.deduct(commodityCode, orderCount);\n\n        orderService.create(userId, commodityCode, orderCount);\n    }\n}\n
\n
public class OrderServiceImpl implements OrderService {\n\n    private OrderDAO orderDAO;\n\n    private AccountService accountService;\n\n    public Order create(String userId, String commodityCode, int orderCount) {\n\n        int orderMoney = calculate(commodityCode, orderCount);\n\n        accountService.debit(userId, orderMoney);\n\n        Order order = new Order();\n        order.userId = userId;\n        order.commodityCode = commodityCode;\n        order.count = orderCount;\n        order.money = orderMoney;\n\n        // INSERT INTO orders ...\n        return orderDAO.insert(order);\n    }\n}\n
\n

SEATA 的分布式交易解决方案

\n

\"\"\n我们只需要使用一个 @GlobalTransactional 注解在业务方法上:

\n
\n    @GlobalTransactional\n    public void purchase(String userId, String commodityCode, int orderCount) {\n        ......\n    }\n
\n

由Dubbo + SEATA提供支持的示例

\n

步骤 1:建立数据库

\n
    \n
  • 要求:具有InnoDB引擎的MySQL。
  • \n
\n

注意: 实际上,在示例用例中,这3个服务应该有3个数据库。 但是,为了简单起见,我们只能创建一个数据库并配置3个数据源。

\n

使用您刚创建的数据库 URL/username/password 修改Spring XML。

\n

dubbo-account-service.xml\ndubbo-order-service.xml\ndubbo-storage-service.xml

\n
        <property name=\"url\" value=\"jdbc:mysql://x.x.x.x:3306/xxx\" />\n        <property name=\"username\" value=\"xxx\" />\n        <property name=\"password\" value=\"xxx\" />\n
\n

步骤 2:创建 UNDO_LOG 表

\n

SEATA AT 模式需要 UNDO_LOG

\n
-- 注意此处0.3.0+ 增加唯一索引 ux_undo_log\nCREATE TABLE `undo_log` (\n  `id` bigint(20) NOT NULL AUTO_INCREMENT,\n  `branch_id` bigint(20) NOT NULL,\n  `xid` varchar(100) NOT NULL,\n  `context` varchar(128) NOT NULL,\n  `rollback_info` longblob NOT NULL,\n  `log_status` int(11) NOT NULL,\n  `log_created` datetime NOT NULL,\n  `log_modified` datetime NOT NULL,\n  `ext` varchar(100) DEFAULT NULL,\n  PRIMARY KEY (`id`),\n  UNIQUE KEY `ux_undo_log` (`xid`,`branch_id`)\n) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;\n
\n

步骤 3:为示例业务创建表

\n
\nDROP TABLE IF EXISTS `storage_tbl`;\nCREATE TABLE `storage_tbl` (\n  `id` int(11) NOT NULL AUTO_INCREMENT,\n  `commodity_code` varchar(255) DEFAULT NULL,\n  `count` int(11) DEFAULT 0,\n  PRIMARY KEY (`id`),\n  UNIQUE KEY (`commodity_code`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n\nDROP TABLE IF EXISTS `order_tbl`;\nCREATE TABLE `order_tbl` (\n  `id` int(11) NOT NULL AUTO_INCREMENT,\n  `user_id` varchar(255) DEFAULT NULL,\n  `commodity_code` varchar(255) DEFAULT NULL,\n  `count` int(11) DEFAULT 0,\n  `money` int(11) DEFAULT 0,\n  PRIMARY KEY (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n\nDROP TABLE IF EXISTS `account_tbl`;\nCREATE TABLE `account_tbl` (\n  `id` int(11) NOT NULL AUTO_INCREMENT,\n  `user_id` varchar(255) DEFAULT NULL,\n  `money` int(11) DEFAULT 0,\n  PRIMARY KEY (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n
\n

步骤 4: 启动服务

\n\n
Usage: sh seata-server.sh(for linux and mac) or cmd seata-server.bat(for windows) [options]\n  Options:\n    --host, -h\n      The host to bind.\n      Default: 0.0.0.0\n    --port, -p\n      The port to listen.\n      Default: 8091\n    --storeMode, -m\n      log store mode : file、db\n      Default: file\n    --help\n\ne.g.\n\nsh seata-server.sh -p 8091 -h 127.0.0.1 -m file\n
\n

步骤 5: 运行示例

\n

示例仓库: seata-samples

\n
    \n
  • 启动 DubboAccountServiceStarter
  • \n
  • 启动 DubboStorageServiceStarter
  • \n
  • 启动 DubboOrderServiceStarter
  • \n
  • 运行 DubboBusinessTester for demo test
  • \n
\n

TBD: 运行演示应用程序的脚本

\n", - "link": "/zh-cn/docs/user/quickstart.html", - "meta": { - "title": "Seata 快速开始", - "keywords": "Seata", - "description": "Seata 快速开始。" - } -} \ No newline at end of file diff --git a/zh-cn/docs/user/saga.html b/zh-cn/docs/user/saga.html deleted file mode 100644 index 7f9dd547..00000000 --- a/zh-cn/docs/user/saga.html +++ /dev/null @@ -1,862 +0,0 @@ - - - - - - - - - - Seata Saga 模式 - - - - -
文档

SEATA Saga 模式

-

概述

-

Saga模式是SEATA提供的长事务解决方案,在Saga模式中,业务流程中每个参与者都提交本地事务,当出现某一个参与者失败则补偿前面已经成功的参与者,一阶段正向服务和二阶段补偿服务都由业务开发实现。

-

Saga模式示意图

-

理论基础:Hector & Kenneth 发表论⽂ Sagas (1987)

-

适用场景:

-
    -
  • 业务流程长、业务流程多
  • -
  • 参与者包含其它公司或遗留系统服务,无法提供 TCC 模式要求的三个接口
  • -
-

优势:

-
    -
  • 一阶段提交本地事务,无锁,高性能
  • -
  • 事件驱动架构,参与者可异步执行,高吞吐
  • -
  • 补偿服务易于实现
  • -
-

缺点:

-
    -
  • 不保证隔离性(应对方案见后面文档)
  • -
-

Saga的实现:

-

基于状态机引擎的 Saga 实现:

-

目前SEATA提供的Saga模式是基于状态机引擎来实现的,机制是:

-
    -
  1. 通过状态图来定义服务调用的流程并生成 json 状态语言定义文件
  2. -
  3. 状态图中一个节点可以是调用一个服务,节点可以配置它的补偿节点
  4. -
  5. 状态图 json 由状态机引擎驱动执行,当出现异常时状态引擎反向执行已成功节点对应的补偿节点将事务回滚
  6. -
-
-

注意: 异常发生时是否进行补偿也可由用户自定义决定

-
-
    -
  1. 可以实现服务编排需求,支持单项选择、并发、子流程、参数转换、参数映射、服务执行状态判断、异常捕获等功能
  2. -
-

示例状态图:

-

示例状态图

-

快速开始

-

Demo简介

-

基于dubbo构建的微服务下,使用Saga模式演示分布式事务的提交和回滚;

-

业务流程图如下图所示:

-

demo业务流程图

-

先下载seata-samples工程:https://github.com/seata/seata-samples.git

-
-

注意SEATA版本需要0.9.0以上

-
-

在dubbo-saga-sample中一个分布式事务内会有2个Saga事务参与者,分别是: InventoryActionBalanceAction ;分布式事务提交则两者均提交,分布式事务回滚则两者均回滚;

-

这2个Saga参与者均是 dubbo 服务,两个参与都有一个reduce方法,表示库存扣减或余额扣减,还有一个compensateReduce方法,表示补偿扣减操作。

-
    -
  • InventoryAction 接口定义如下:
  • -
-
public interface InventoryAction {
-
-    /**
-     * reduce
-     * @param businessKey
-     * @param amount
-     * @param params
-     * @return
-     */
-    boolean reduce(String businessKey, BigDecimal amount, Map<String, Object> params);
-
-    /**
-     * compensateReduce
-     * @param businessKey
-     * @param params
-     * @return
-     */
-    boolean compensateReduce(String businessKey, Map<String, Object> params);
-}
-
-
    -
  • 这个场景用状态语言定义就是下面的json:src/main/resources/statelang/reduce_inventory_and_balance.json
  • -
-
{
-    "Name": "reduceInventoryAndBalance",
-    "Comment": "reduce inventory then reduce balance in a transaction",
-    "StartState": "ReduceInventory",
-    "Version": "0.0.1",
-    "States": {
-        "ReduceInventory": {
-            "Type": "ServiceTask",
-            "ServiceName": "inventoryAction",
-            "ServiceMethod": "reduce",
-            "CompensateState": "CompensateReduceInventory",
-            "Next": "ChoiceState",
-            "Input": [
-                "$.[businessKey]",
-                "$.[count]"
-            ],
-            "Output": {
-                "reduceInventoryResult": "$.#root"
-            },
-            "Status": {
-                "#root == true": "SU",
-                "#root == false": "FA",
-                "$Exception{java.lang.Throwable}": "UN"
-            }
-        },
-        "ChoiceState":{
-            "Type": "Choice",
-            "Choices":[
-                {
-                    "Expression":"[reduceInventoryResult] == true",
-                    "Next":"ReduceBalance"
-                }
-            ],
-            "Default":"Fail"
-        },
-        "ReduceBalance": {
-            "Type": "ServiceTask",
-            "ServiceName": "balanceAction",
-            "ServiceMethod": "reduce",
-            "CompensateState": "CompensateReduceBalance",
-            "Input": [
-                "$.[businessKey]",
-                "$.[amount]",
-                {
-                    "throwException" : "$.[mockReduceBalanceFail]"
-                }
-            ],
-            "Output": {
-                "compensateReduceBalanceResult": "$.#root"
-            },
-            "Status": {
-                "#root == true": "SU",
-                "#root == false": "FA",
-                "$Exception{java.lang.Throwable}": "UN"
-            },
-            "Catch": [
-                {
-                    "Exceptions": [
-                        "java.lang.Throwable"
-                    ],
-                    "Next": "CompensationTrigger"
-                }
-            ],
-            "Next": "Succeed"
-        },
-        "CompensateReduceInventory": {
-            "Type": "ServiceTask",
-            "ServiceName": "inventoryAction",
-            "ServiceMethod": "compensateReduce",
-            "Input": [
-                "$.[businessKey]"
-            ]
-        },
-        "CompensateReduceBalance": {
-            "Type": "ServiceTask",
-            "ServiceName": "balanceAction",
-            "ServiceMethod": "compensateReduce",
-            "Input": [
-                "$.[businessKey]"
-            ]
-        },
-        "CompensationTrigger": {
-            "Type": "CompensationTrigger",
-            "Next": "Fail"
-        },
-        "Succeed": {
-            "Type":"Succeed"
-        },
-        "Fail": {
-            "Type":"Fail",
-            "ErrorCode": "PURCHASE_FAILED",
-            "Message": "purchase failed"
-        }
-    }
-}
-
-

该json表示的状态图:

-

该json表示的状态图

-

状态语言在一定程度上参考了AWS Step Functions

-

"状态机" 属性简介:

-
    -
  • Name: 表示状态机的名称,必须唯一
  • -
  • Comment: 状态机的描述
  • -
  • Version: 状态机定义版本
  • -
  • StartState: 启动时运行的第一个"状态"
  • -
  • States: 状态列表,是一个map结构,key是"状态"的名称,在状态机内必须唯一
  • -
-

"状态" 属性简介:

-
    -
  • Type: "状态" 的类型,比如有: -
      -
    • ServiceTask: 执行调用服务任务
    • -
    • Choice: 单条件选择路由
    • -
    • CompensationTrigger: 触发补偿流程
    • -
    • Succeed: 状态机正常结束
    • -
    • Fail: 状态机异常结束
    • -
    • SubStateMachine: 调用子状态机
    • -
    • CompensateSubMachine: 用于补偿一个子状态机
    • -
    -
  • -
  • ServiceName: 服务名称,通常是服务的beanId
  • -
  • ServiceMethod: 服务方法名称
  • -
  • CompensateState: 该"状态"的补偿"状态"
  • -
  • Input: 调用服务的输入参数列表, 是一个数组, 对应于服务方法的参数列表, $.表示使用表达式从状态机上下文中取参数,表达使用的SpringEL, 如果是常量直接写值即可
  • -
  • Ouput: 将服务返回的参数赋值到状态机上下文中, 是一个map结构,key为放入到状态机上文时的key(状态机上下文也是一个map),value中$.是表示SpringEL表达式,表示从服务的返回参数中取值,#root表示服务的整个返回参数
  • -
  • Status: 服务执行状态映射,框架定义了三个状态,SU 成功、FA 失败、UN 未知, 我们需要把服务执行的状态映射成这三个状态,帮助框架判断整个事务的一致性,是一个map结构,key是条件表达式,一般是取服务的返回值或抛出的异常进行判断,默认是SpringEL表达式判断服务返回参数,带$Exception{开头表示判断异常类型。value是当这个条件表达式成立时则将服务执行状态映射成这个值
  • -
  • Catch: 捕获到异常后的路由
  • -
  • Next: 服务执行完成后下一个执行的"状态"
  • -
  • Choices: Choice类型的"状态"里, 可选的分支列表, 分支中的Expression为SpringEL表达式, Next为当表达式成立时执行的下一个"状态"
  • -
  • ErrorCode: Fail类型"状态"的错误码
  • -
  • Message: Fail类型"状态"的错误信息
  • -
-

更多详细的状态语言解释请看State language referance章节

-

更多详细的状态语言使用示例见https://github.com/seata/seata/tree/develop/test/src/test/java/io/seata/saga/engine

-

Demo 运行指南

-

step 1 启动 SEATA Server

-

运行 SeataServerStarter ,启动 Seata Server;

-

step 2 启动 dubbo provider Demo

-

运行 DubboSagaProviderStarter ,启动 dubbo provider;

-

step 3 启动 Saga Demo

-

运行 DubboSagaTransactionStarter , 启动 demo工程;

-
-

Demo中的数据库使用的是H2内存数据库, 生产上建议使用与业务相同的库, 目前支持Oracle, Mysql, DB2. 建表语句在 https://github.com/seata/seata/tree/develop/saga/seata-saga-engine-store/src/main/resources/sql

-
-
-

Demo中还有调用本地服务和调用SOFA RPC服务的示例

-
-

状态机设计器

-

Seata Saga 提供了一个可视化的状态机设计器方便用户使用,代码和运行指南请参考: -https://github.com/seata/seata/tree/develop/saga/seata-saga-statemachine-designer

-

状态机设计器截图: -状态机设计器

-

状态机设计器演示地址:http://seata.io/saga_designer/index.html

-

最佳实践

-

Saga 服务设计的实践经验

-

允许空补偿

-
    -
  • 空补偿:原服务未执行,补偿服务执行了
  • -
  • 出现原因: -
      -
    • 原服务 超时(丢包)
    • -
    • Saga 事务触发 回滚
    • -
    • 未收到 原服务请求,先收到 补偿请求
    • -
    -
  • -
-

所以服务设计时需要允许空补偿, 即没有找到要补偿的业务主键时返回补偿成功并将原业务主键记录下来

-

防悬挂控制

-
    -
  • 悬挂:补偿服务 比 原服务 先执行
  • -
  • 出现原因: -
      -
    • 原服务 超时(拥堵)
    • -
    • Saga 事务回滚,触发 回滚
    • -
    • 拥堵的 原服务 到达
    • -
    -
  • -
-

所以要检查当前业务主键是否已经在空补偿记录下来的业务主键中存在,如果存在则要拒绝服务的执行

-

幂等控制

-
    -
  • 原服务与补偿服务都需要保证幂等性, 由于网络可能超时, 可以设置重试策略,重试发生时要通过幂等控制避免业务数据重复更新
  • -
-

缺乏隔离性的应对

-
    -
  • 由于 Saga 事务不保证隔离性, 在极端情况下可能由于脏写无法完成回滚操作, 比如举一个极端的例子, 分布式事务内先给用户A充值, 然后给用户B扣减余额, 如果在给A用户充值成功, 在事务提交以前, A用户把余额消费掉了, 如果事务发生回滚, 这时则没有办法进行补偿了。这就是缺乏隔离性造成的典型的问题, 实践中一般的应对方法是: -
      -
    • 业务流程设计时遵循“宁可长款, 不可短款”的原则, 长款意思是客户少了钱机构多了钱, 以机构信誉可以给客户退款, 反之则是短款, 少的钱可能追不回来了。所以在业务流程设计上一定是先扣款。
    • -
    • 有些业务场景可以允许让业务最终成功, 在回滚不了的情况下可以继续重试完成后面的流程, 所以状态机引擎除了提供“回滚”能力还需要提供“向前”恢复上下文继续执行的能力, 让业务最终执行成功, 达到最终一致性的目的。
    • -
    -
  • -
-

API referance

-

StateMachineEngine API

-
public interface StateMachineEngine {
-
-    /**
-     * start a state machine instance
-     * @param stateMachineName
-     * @param tenantId
-     * @param startParams
-     * @return
-     * @throws EngineExecutionException
-     */
-    StateMachineInstance start(String stateMachineName, String tenantId, Map<String, Object> startParams) throws EngineExecutionException;
-
-    /**
-     * start a state machine instance with businessKey
-     * @param stateMachineName
-     * @param tenantId
-     * @param businessKey
-     * @param startParams
-     * @return
-     * @throws EngineExecutionException
-     */
-    StateMachineInstance startWithBusinessKey(String stateMachineName, String tenantId, String businessKey, Map<String, Object> startParams) throws EngineExecutionException;
-
-    /**
-     * start a state machine instance asynchronously
-     * @param stateMachineName
-     * @param tenantId
-     * @param startParams
-     * @param callback
-     * @return
-     * @throws EngineExecutionException
-     */
-    StateMachineInstance startAsync(String stateMachineName, String tenantId, Map<String, Object> startParams, AsyncCallback callback) throws EngineExecutionException;
-
-    /**
-     * start a state machine instance asynchronously with businessKey
-     * @param stateMachineName
-     * @param tenantId
-     * @param businessKey
-     * @param startParams
-     * @param callback
-     * @return
-     * @throws EngineExecutionException
-     */
-    StateMachineInstance startWithBusinessKeyAsync(String stateMachineName, String tenantId, String businessKey, Map<String, Object> startParams, AsyncCallback callback) throws EngineExecutionException;
-
-    /**
-     * forward restart a failed state machine instance
-     * @param stateMachineInstId
-     * @param replaceParams
-     * @return
-     * @throws ForwardInvalidException
-     */
-    StateMachineInstance forward(String stateMachineInstId, Map<String, Object> replaceParams) throws ForwardInvalidException;
-
-    /**
-     * forward restart a failed state machine instance asynchronously
-     * @param stateMachineInstId
-     * @param replaceParams
-     * @param callback
-     * @return
-     * @throws ForwardInvalidException
-     */
-    StateMachineInstance forwardAsync(String stateMachineInstId, Map<String, Object> replaceParams, AsyncCallback callback) throws ForwardInvalidException;
-
-    /**
-     * compensate a state machine instance
-     * @param stateMachineInstId
-     * @param replaceParams
-     * @return
-     * @throws EngineExecutionException
-     */
-    StateMachineInstance compensate(String stateMachineInstId, Map<String, Object> replaceParams) throws EngineExecutionException;
-
-    /**
-     * compensate a state machine instance asynchronously
-     * @param stateMachineInstId
-     * @param replaceParams
-     * @param callback
-     * @return
-     * @throws EngineExecutionException
-     */
-    StateMachineInstance compensateAsync(String stateMachineInstId, Map<String, Object> replaceParams, AsyncCallback callback) throws EngineExecutionException;
-
-    /**
-     * skip current failed state instance and forward restart state machine instance
-     * @param stateMachineInstId
-     * @return
-     * @throws EngineExecutionException
-     */
-    StateMachineInstance skipAndForward(String stateMachineInstId) throws EngineExecutionException;
-
-    /**
-     * skip current failed state instance and forward restart state machine instance asynchronously
-     * @param stateMachineInstId
-     * @param callback
-     * @return
-     * @throws EngineExecutionException
-     */
-    StateMachineInstance skipAndForwardAsync(String stateMachineInstId, AsyncCallback callback) throws EngineExecutionException;
-
-    /**
-     * get state machine configurations
-     * @return
-     */
-    StateMachineConfig getStateMachineConfig();
-}
-
-

StateMachine Execution Instance API:

-
StateLogRepository stateLogRepository = stateMachineEngine.getStateMachineConfig().getStateLogRepository();
-StateMachineInstance stateMachineInstance = stateLogRepository.getStateMachineInstanceByBusinessKey(businessKey, tenantId);
-
-/**
- * State Log Repository
- *
- * @author lorne.cl
- */
-public interface StateLogRepository {
-
-    /**
-     * Get state machine instance
-     *
-     * @param stateMachineInstanceId
-     * @return
-     */
-    StateMachineInstance getStateMachineInstance(String stateMachineInstanceId);
-
-    /**
-     * Get state machine instance by businessKey
-     *
-     * @param businessKey
-     * @param tenantId
-     * @return
-     */
-    StateMachineInstance getStateMachineInstanceByBusinessKey(String businessKey, String tenantId);
-
-    /**
-     * Query the list of state machine instances by parent id
-     *
-     * @param parentId
-     * @return
-     */
-    List<StateMachineInstance> queryStateMachineInstanceByParentId(String parentId);
-
-    /**
-     * Get state instance
-     *
-     * @param stateInstanceId
-     * @param machineInstId
-     * @return
-     */
-    StateInstance getStateInstance(String stateInstanceId, String machineInstId);
-
-    /**
-     * Get a list of state instances by state machine instance id
-     *
-     * @param stateMachineInstanceId
-     * @return
-     */
-    List<StateInstance> queryStateInstanceListByMachineInstanceId(String stateMachineInstanceId);
-}
-
-

StateMachine Definition API:

-
StateMachineRepository stateMachineRepository = stateMachineEngine.getStateMachineConfig().getStateMachineRepository();
-StateMachine stateMachine = stateMachineRepository.getStateMachine(stateMachineName, tenantId);
-
-/**
- * StateMachineRepository
- *
- * @author lorne.cl
- */
-public interface StateMachineRepository {
-
-    /**
-     * Gets get state machine by id.
-     *
-     * @param stateMachineId the state machine id
-     * @return the get state machine by id
-     */
-    StateMachine getStateMachineById(String stateMachineId);
-
-    /**
-     * Gets get state machine.
-     *
-     * @param stateMachineName the state machine name
-     * @param tenantId         the tenant id
-     * @return the get state machine
-     */
-    StateMachine getStateMachine(String stateMachineName, String tenantId);
-
-    /**
-     * Gets get state machine.
-     *
-     * @param stateMachineName the state machine name
-     * @param tenantId         the tenant id
-     * @param version          the version
-     * @return the get state machine
-     */
-    StateMachine getStateMachine(String stateMachineName, String tenantId, String version);
-
-    /**
-     * Register the state machine to the repository (if the same version already exists, return the existing version)
-     *
-     * @param stateMachine
-     */
-    StateMachine registryStateMachine(StateMachine stateMachine);
-
-    /**
-     * registry by resources
-     *
-     * @param resources
-     * @param tenantId
-     */
-    void registryByResources(Resource[] resources, String tenantId) throws IOException;
-}
-
-

Config referance

-

在Spring Bean配置文件中配置一个StateMachineEngine

-
<bean id="dataSource" class="...">
-...
-<bean>
-<bean id="stateMachineEngine" class="io.seata.saga.engine.impl.ProcessCtrlStateMachineEngine">
-        <property name="stateMachineConfig" ref="dbStateMachineConfig"></property>
-</bean>
-<bean id="dbStateMachineConfig" class="io.seata.saga.engine.config.DbStateMachineConfig">
-    <property name="dataSource" ref="dataSource"></property>
-    <property name="resources" value="statelang/*.json"></property>
-    <property name="enableAsync" value="true"></property>
-    <property name="threadPoolExecutor" ref="threadExecutor"></property><!-- 事件驱动执行时使用的线程池, 如果所有状态机都同步执行可以不需要 -->
-    <property name="applicationId" value="saga_sample"></property>
-    <property name="txServiceGroup" value="my_test_tx_group"></property>
-</bean>
-<bean id="threadExecutor"
-        class="org.springframework.scheduling.concurrent.ThreadPoolExecutorFactoryBean">
-    <property name="threadNamePrefix" value="SAGA_ASYNC_EXE_" />
-    <property name="corePoolSize" value="1" />
-    <property name="maxPoolSize" value="20" />
-</bean>
-
-<!-- Seata Server进行事务恢复时需要通过这个Holder拿到stateMachineEngine实例 -->
-<bean class="io.seata.saga.rm.StateMachineEngineHolder">
-    <property name="stateMachineEngine" ref="stateMachineEngine"/>
-</bean>
-
-

State language referance

-

"状态机"的属性列表

-
{
-    "Name": "reduceInventoryAndBalance",
-    "Comment": "reduce inventory then reduce balance in a transaction",
-    "StartState": "ReduceInventory",
-    "Version": "0.0.1",
-    "States": {
-    }
-}
-
-
    -
  • Name: 表示状态机的名称,必须唯一
  • -
  • Comment: 状态机的描述
  • -
  • Version: 状态机定义版本
  • -
  • StartState: 启动时运行的第一个"状态"
  • -
  • States: 状态列表,是一个map结构,key是"状态"的名称,在状态机内必须唯一, value是一个map结构表示"状态"的属性列表
  • -
-

各种"状态"的属性列表

-

ServiceTask:

-
"States": {
-    ...
-    "ReduceBalance": {
-        "Type": "ServiceTask",
-        "ServiceName": "balanceAction",
-        "ServiceMethod": "reduce",
-        "CompensateState": "CompensateReduceBalance",
-        "IsForUpdate": true,
-        "IsPersist": true,
-        "IsAsync": false,
-        "Input": [
-            "$.[businessKey]",
-            "$.[amount]",
-            {
-                "throwException" : "$.[mockReduceBalanceFail]"
-            }
-        ],
-        "Output": {
-            "compensateReduceBalanceResult": "$.#root"
-        },
-        "Status": {
-            "#root == true": "SU",
-            "#root == false": "FA",
-            "$Exception{java.lang.Throwable}": "UN"
-        },
-        "Retry": [
-            {
-                "Exceptions": ["io.seata.saga.engine.mock.DemoException"],
-                "IntervalSeconds": 1.5,
-                "MaxAttempts": 3,
-                "BackoffRate": 1.5
-            },
-            {
-                "IntervalSeconds": 1,
-                "MaxAttempts": 3,
-                "BackoffRate": 1.5
-            }
-        ],
-        "Catch": [
-            {
-                "Exceptions": [
-                    "java.lang.Throwable"
-                ],
-                "Next": "CompensationTrigger"
-            }
-        ],
-        "Next": "Succeed"
-    }
-    ...
-}
-
-
    -
  • ServiceName: 服务名称,通常是服务的beanId
  • -
  • ServiceMethod: 服务方法名称
  • -
  • CompensateState: 该"状态"的补偿"状态"
  • -
  • IsForUpdate: 标识该服务会更新数据, 默认是false, 如果配置了CompensateState则默认是true, 有补偿服务的服务肯定是数据更新类服务
  • -
  • IsPersist: 执行日志是否进行存储, 默认是true, 有一些查询类的服务可以配置为false, 执行日志不进行存储提高性能, 因为当异常恢复时可以重复执行
  • -
  • IsAsync: 异步调用服务, 注意: 因为异步调用服务会忽略服务的返回结果, 所以用户定义的服务执行状态映射(下面的Status属性)将被忽略, 默认为服务调用成功, 如果提交异步调用就失败(比如线程池已满)则为服务执行状态为失败
  • -
  • Input: 调用服务的输入参数列表, 是一个数组, 对应于服务方法的参数列表, $.表示使用表达式从状态机上下文中取参数,表达使用的SpringEL, 如果是常量直接写值即可。复杂的参数如何传入见:复杂参数的Input定义
  • -
  • Output: 将服务返回的参数赋值到状态机上下文中, 是一个map结构,key为放入到状态机上文时的key(状态机上下文也是一个map),value中$.是表示SpringEL表达式,表示从服务的返回参数中取值,#root表示服务的整个返回参数
  • -
  • Status: 服务执行状态映射,框架定义了三个状态,SU 成功、FA 失败、UN 未知, 我们需要把服务执行的状态映射成这三个状态,帮助框架判断整个事务的一致性,是一个map结构,key是条件表达式,一般是取服务的返回值或抛出的异常进行判断,默认是SpringEL表达式判断服务返回参数,带$Exception{开头表示判断异常类型。value是当这个条件表达式成立时则将服务执行状态映射成这个值
  • -
  • Catch: 捕获到异常后的路由
  • -
  • Retry: 捕获异常后的重试策略, 是个数组可以配置多个规则, Exceptions 为匹配的的异常列表, IntervalSeconds 为重试间隔, MaxAttempts 为最大重试次数, BackoffRate 下一次重试间隔相对于上一次重试间隔的倍数,比如说上次一重试间隔是2秒, BackoffRate=1.5 则下一次重试间隔是3秒。Exceptions 属性可以不配置, 不配置时表示框架自动匹配网络超时异常。当在重试过程中发生了别的异常,框架会重新匹配规则,并按新规则进行重试,同一种规则的总重试次数不会超过该规则的MaxAttempts
  • -
  • Next: 服务执行完成后下一个执行的"状态"
  • -
-
-

当没有配置Status对服务执行状态进行映射, 系统会自动判断状态:

-
    -
  • 没有异常则认为执行成功,
  • -
  • 如果有异常, 则判断异常是不是网路连接超时, 如果是则认为是FA
  • -
  • 如果是其它异常, 服务IsForUpdate=true则状态为UN, 否则为FA
  • -
-
-
-

整个状态机的执行状态如何判断?是由框架自己判断的, 状态机有两个状态: status(正向执行状态), compensateStatus(补偿状态):

-
    -
  • 如果所有服务执行成功(事务提交成功)则status=SU, compensateStatus=null
  • -
  • 如果有服务执行失败且存在更新类服务执行成功且没有进行补偿(事务提交失败) 则status=UN, compensateStatus=null
  • -
  • 如果有服务执行失败且不存在更新类服务执行成功且没有进行补偿(事务提交失败) 则status=FA, compensateStatus=null
  • -
  • 如果补偿成功(事务回滚成功)则status=FA/UN, compensateStatus=SU
  • -
  • 发生补偿且有未补偿成功的服务(回滚失败)则status=FA/UN, compensateStatus=UN
  • -
  • 存在事务提交或回滚失败的情况Seata Sever都会不断发起重试
  • -
-
-

Choice:

-
"ChoiceState":{
-    "Type": "Choice",
-    "Choices":[
-        {
-            "Expression":"[reduceInventoryResult] == true",
-            "Next":"ReduceBalance"
-        }
-    ],
-    "Default":"Fail"
-}
-
-

Choice类型的"状态"是单项选择路由 -Choices: 可选的分支列表, 只会选择第一个条件成立的分支 -Expression: SpringEL表达式 -Next: 当Expression表达式成立时执行的下一个"状态"

-

Succeed:

-
"Succeed": {
-    "Type":"Succeed"
-}
-
-

运行到"Succeed状态"表示状态机正常结束, 正常结束不代表成功结束, 是否成功要看每个"状态"是否都成功

-

Fail:

-
"Fail": {
-    "Type":"Fail",
-    "ErrorCode": "PURCHASE_FAILED",
-    "Message": "purchase failed"
-}
-
-

运行到"Fail状态"状态机异常结束, 异常结束时可以配置ErrorCode和Message, 表示错误码和错误信息, 可以用于给调用方返回错误码和消息

-

CompensationTrigger:

-
"CompensationTrigger": {
-    "Type": "CompensationTrigger",
-    "Next": "Fail"
-}
-
-

CompensationTrigger类型的state是用于触发补偿事件, 回滚分布式事务 -Next: 补偿成功后路由到的state

-

SubStateMachine:

-
"CallSubStateMachine": {
-    "Type": "SubStateMachine",
-    "StateMachineName": "simpleCompensationStateMachine",
-    "CompensateState": "CompensateSubMachine",
-    "Input": [
-        {
-            "a": "$.1",
-            "barThrowException": "$.[barThrowException]",
-            "fooThrowException": "$.[fooThrowException]",
-            "compensateFooThrowException": "$.[compensateFooThrowException]"
-        }
-    ],
-    "Output": {
-        "fooResult": "$.#root"
-    },
-    "Next": "Succeed"
-}
-
-

SubStateMachine类型的"状态"是调用子状态机 -StateMachineName: 要调用的子状态机名称 -CompensateState: 子状态机的补偿state, 可以不配置, 系统会自动创建它的补偿state, 子状态机的补偿实际就是调用子状态机的compensate方法, 所以用户并不需要自己实现一个对子状态机的补偿服务。当配置这个属性时, 可以里利用Input属性自定义传入一些变量, 见下面的CompensateSubMachine

-

CompensateSubMachine:

-
"CompensateSubMachine": {
-    "Type": "CompensateSubMachine",
-    "Input": [
-        {
-            "compensateFooThrowException": "$.[compensateFooThrowException]"
-        }
-    ]
-}
-
-

CompensateSubMachine类型的state是专门用于补偿一个子状态机的state,它会调用子状态机的compensate方法,可以利用Input属性传入一些自定义的变量, Status属性自定判断补偿是否成功

-

复杂参数的Input定义

-
"FirstState": {
-    "Type": "ServiceTask",
-    "ServiceName": "demoService",
-    "ServiceMethod": "complexParameterMethod",
-    "Next": "ChoiceState",
-    "ParameterTypes" : ["java.lang.String", "int", "io.seata.saga.engine.mock.DemoService$People", "[Lio.seata.saga.engine.mock.DemoService$People;", "java.util.List", "java.util.Map"],
-    "Input": [
-        "$.[people].name",
-        "$.[people].age",
-        {
-            "name": "$.[people].name",
-            "age": "$.[people].age",
-            "childrenArray": [
-                {
-                    "name": "$.[people].name",
-                    "age": "$.[people].age"
-                },
-                {
-                    "name": "$.[people].name",
-                    "age": "$.[people].age"
-                }
-            ],
-            "childrenList": [
-                {
-                    "name": "$.[people].name",
-                    "age": "$.[people].age"
-                },
-                {
-                    "name": "$.[people].name",
-                    "age": "$.[people].age"
-                }
-            ],
-            "childrenMap": {
-                "lilei": {
-                    "name": "$.[people].name",
-                    "age": "$.[people].age"
-                }
-            }
-        },
-        [
-            {
-                "name": "$.[people].name",
-                "age": "$.[people].age"
-            },
-            {
-                "name": "$.[people].name",
-                "age": "$.[people].age"
-            }
-        ],
-        [
-            {
-                "@type": "io.seata.saga.engine.mock.DemoService$People",
-                "name": "$.[people].name",
-                "age": "$.[people].age"
-            }
-        ],
-        {
-            "lilei": {
-                "@type": "io.seata.saga.engine.mock.DemoService$People",
-                "name": "$.[people].name",
-                "age": "$.[people].age"
-            }
-        }
-    ],
-    "Output": {
-        "complexParameterMethodResult": "$.#root"
-    }
-}
-
-

上面的complexParameterMethod方法定义如下:

-
People complexParameterMethod(String name, int age, People people, People[] peopleArrya, List<People> peopleList, Map<String, People> peopleMap)
-
-class People {
-
-    private String name;
-    private int    age;
-
-    private People[] childrenArray;
-    private List<People> childrenList;
-    private Map<String, People> childrenMap;
-
-    ...
-}
-
-

启动状态机时传入参数:

-
Map<String, Object> paramMap = new HashMap<>(1);
-People people = new People();
-people.setName("lilei");
-people.setAge(18);
-paramMap.put("people", people);
-String stateMachineName = "simpleStateMachineWithComplexParams";
-StateMachineInstance inst = stateMachineEngine.start(stateMachineName, null, paramMap);
-
-
-

注意ParameterTypes属性是可以不用传的,调用的方法的参数列表中有Map, List这种可以带泛型的集合类型, 因为java编译会丢失泛型, 所以需要用这个属性, 同时在Input的json中对应的对这个json加"@type"来申明泛型(集合的元素类型)

-
-

FAQ

-
-

问: saga服务流程可以不配置吗,使用全局事务id串起来,这样省去配置的工作量,再加上人工配置难免会配置错误?

-

答: saga一般有两种实现,一种是基于状态机定义,比如apache camel saga、eventuate,一种是基于注解+拦截器实现,比如serviceComb saga,后者是不需要配置状态图的。由于 Saga 事务不保证隔离性, 在极端情况下可能由于脏写无法完成回滚操作, 比如举一个极端的例子, 分布式事务内先给用户A充值, 然后给用户B扣减余额, 如果在给A用户充值成功, 在事务提交以前, A用户把余额消费掉了, 如果事务发生回滚, 这时则没有办法进行补偿了,有些业务场景可以允许让业务最终成功, 在回滚不了的情况下可以继续重试完成后面的流程, 基于状态机引擎除可以提供“回滚”能力外, 还可以提供“向前”恢复上下文继续执行的能力, 让业务最终执行成功, 达到最终一致性的目的,所以在实际生产中基于状态机的实现应用更多。后续也会提供基于注解+拦截器实现。

-
-

问: 比如有 服务A在系统1里面,服务B在系统2里面。全局事务由A开启,流程调用B开启子事务,那系统2也需要维护Saga状态机的三个表吗,也需要在Spring Bean配置文件中配置一个StateMachineEngine吗?

-

答: 不需要, 只在发起方记录日志. 由于只在发起方记录日志同时对参与者服务没有接口参数的要求,使得Saga可以方便集成其它机构或遗留系统的服务

-
-

问: 如果 系统1和系统2里面的服务,可以相互调用。系统12都可以开启全局事务,可以这样使用吗。那1和2 都需要维护Saga状态机的三个表,也需要在Spring Bean配置文件中配置一个StateMachineEngine?

-

答: 可以这样使用,如果两个系统都开启saga事务,那就要记录那三个表配置StateMachineEngine

-
-

问: 使用 Seata 的时候,现在是 AT 模式 如果改成 saga 模式的话,改造会大吗?

-

答: AT 模式完全是透明的,Saga 是有侵入性的,要配置状态机 json,如果服务多改造会比较大

-
-

问: Saga 模式是不是基于 AT 来加强的长事务处理呢?

-

答: 没有基于 AT,客户端完全是两套,Server 端是复用的。你也可以看 Saga 的单元测试,那里有很多示例:https://github.com/seata/seata/tree/develop/test/src/test/java/io/seata/saga/engine

-
-

问: 开发者文档中状态机引擎原理图里的EventQueue只是开启分布式事务的系统来进行事件驱动,调用其它系统服务像调用本地一样。系统之间还是RPC调用是吧。而不是系统之前也是纯事件驱动的?("系统之间也是纯事件驱动的" 指 RPC 也是非阻塞的)

-

答: 节点与节点间是事件驱动的, RPC 的非阻塞需要 rpc client 支持,理论上也是可以的。rpc client 如果也是非阻塞 IO,那么所有环节都是异步了。

-
-

问: 考虑一个业务流程, 它后续的子流程, 不管谁先运行都不会相互影响,可以异步调用。子流程是其它系统服务。Seata Saga 是不是实现了这点,其实我没看明白 ,Seata Saga异步调用具体是不是各个节点异步了?

-

答: Saga的异步启动一个状态机(stateMachineEngine.startAsync)是指状态机内所有的状态都是事件驱动来执行的, 整个流程实际是同步的, 上一个状态结束才产生下一个状态的事件. 而异步调用一个服务是配置该ServiceTask为"IsAsync":true, 这个服务将会异步调用, 不会阻塞状态机的推进, 状态机不关心它的执行结果.

-
-

问: Saga源码中事件驱动层同步bus和异步bus是什么作用?

-

答: 同步BUS是线程阻塞的,等整个状态机执行完毕才返回,异步BUS是非线程阻塞的,调用后立即返回,状态机执行完毕后回调你的Callback。

-
-

问: IsPersist: 执行日志是否进行存储,默认是true,有一些查询类的服务可以配置在false,执行日志不进行存储提高性能,因为当异常恢复时可以重复执行?

-

答: 是的可以配置成false, 不过建议先保持默认,这样在查询执行日志比较全,真的要做性能调优再配,一般不会有性能问题

-
-

问: seata saga 开启事务的客户端或者seata server服务端宕机或者重启,未完成的状态机实例是怎么保证继续执行下去的?谁去触发这个操作?

-

答: 状态机实例在本地数据库有记录日志,通过日志恢复。seata server 会触触发事务恢复。

-
-

问: saga 的json文件 支持热部署吗?

-

答: 支持, stateMachineEngine.getStateMachineConfig().getStateMachineRepository().registryByResources()。不过java代码和服务需要自己实现支持热部署

-
-

问: 出参入参都放在saga的上下文中,如果参数内容较多较大,业务量又大的话,对内存有限制吗?

-

答: 没有做限制,建议无关的参数不要放到上下文。下一个服务需要用的参数、或用于分支判断的参数可以放入上下文。

-
-

问: 确认个事情:每个节点,要么自己方法内部 Catch 异常处理,使最终有返回信息。要么自己内部可以不处理,交由状态机引擎捕获异常,在 json 中定义 Catch 属性。 而不是补偿节点能够自动触发补偿,需要补偿必须手动在 json,由 Catch 或者 Choices 属性路由到 CompensationTrigger?

-

答: 对的,这个是为了提高灵活性。用户可以自己控制是否进行回滚,因为并不是所有异常都要回滚,可能有一些自定义处理手段。

-
-

问: 所以 Catch 和 Choices 可以随便路由到想要的 state 对吧?

-

答: 是的。这种自定义出发补偿的设计是参考了 bpmn2.0 的。

-
-

问: 还有关于 json 文件,我打算一条流程,就定义一个 json,虽然有的流程很像,用 Choices,可以解决。但是感觉 json 还是要尽量简单。这样考虑对吗?

-

答: 你可以考虑用子状态机来复用,子状态机会多生成一行 stateMachineInstance 记录,但对性能影响应该不大。

-
-
- - - - - - - diff --git a/zh-cn/docs/user/saga.json b/zh-cn/docs/user/saga.json deleted file mode 100644 index e385dd35..00000000 --- a/zh-cn/docs/user/saga.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "saga.md", - "__html": "

SEATA Saga 模式

\n

概述

\n

Saga模式是SEATA提供的长事务解决方案,在Saga模式中,业务流程中每个参与者都提交本地事务,当出现某一个参与者失败则补偿前面已经成功的参与者,一阶段正向服务和二阶段补偿服务都由业务开发实现。

\n

\"Saga模式示意图\"

\n

理论基础:Hector & Kenneth 发表论⽂ Sagas (1987)

\n

适用场景:

\n
    \n
  • 业务流程长、业务流程多
  • \n
  • 参与者包含其它公司或遗留系统服务,无法提供 TCC 模式要求的三个接口
  • \n
\n

优势:

\n
    \n
  • 一阶段提交本地事务,无锁,高性能
  • \n
  • 事件驱动架构,参与者可异步执行,高吞吐
  • \n
  • 补偿服务易于实现
  • \n
\n

缺点:

\n
    \n
  • 不保证隔离性(应对方案见后面文档)
  • \n
\n

Saga的实现:

\n

基于状态机引擎的 Saga 实现:

\n

目前SEATA提供的Saga模式是基于状态机引擎来实现的,机制是:

\n
    \n
  1. 通过状态图来定义服务调用的流程并生成 json 状态语言定义文件
  2. \n
  3. 状态图中一个节点可以是调用一个服务,节点可以配置它的补偿节点
  4. \n
  5. 状态图 json 由状态机引擎驱动执行,当出现异常时状态引擎反向执行已成功节点对应的补偿节点将事务回滚
  6. \n
\n
\n

注意: 异常发生时是否进行补偿也可由用户自定义决定

\n
\n
    \n
  1. 可以实现服务编排需求,支持单项选择、并发、子流程、参数转换、参数映射、服务执行状态判断、异常捕获等功能
  2. \n
\n

示例状态图:

\n

\"示例状态图\"

\n

快速开始

\n

Demo简介

\n

基于dubbo构建的微服务下,使用Saga模式演示分布式事务的提交和回滚;

\n

业务流程图如下图所示:

\n

\"demo业务流程图\"

\n

先下载seata-samples工程:https://github.com/seata/seata-samples.git

\n
\n

注意SEATA版本需要0.9.0以上

\n
\n

在dubbo-saga-sample中一个分布式事务内会有2个Saga事务参与者,分别是: InventoryActionBalanceAction ;分布式事务提交则两者均提交,分布式事务回滚则两者均回滚;

\n

这2个Saga参与者均是 dubbo 服务,两个参与都有一个reduce方法,表示库存扣减或余额扣减,还有一个compensateReduce方法,表示补偿扣减操作。

\n
    \n
  • InventoryAction 接口定义如下:
  • \n
\n
public interface InventoryAction {\n\n    /**\n     * reduce\n     * @param businessKey\n     * @param amount\n     * @param params\n     * @return\n     */\n    boolean reduce(String businessKey, BigDecimal amount, Map<String, Object> params);\n\n    /**\n     * compensateReduce\n     * @param businessKey\n     * @param params\n     * @return\n     */\n    boolean compensateReduce(String businessKey, Map<String, Object> params);\n}\n
\n
    \n
  • 这个场景用状态语言定义就是下面的json:src/main/resources/statelang/reduce_inventory_and_balance.json
  • \n
\n
{\n    \"Name\": \"reduceInventoryAndBalance\",\n    \"Comment\": \"reduce inventory then reduce balance in a transaction\",\n    \"StartState\": \"ReduceInventory\",\n    \"Version\": \"0.0.1\",\n    \"States\": {\n        \"ReduceInventory\": {\n            \"Type\": \"ServiceTask\",\n            \"ServiceName\": \"inventoryAction\",\n            \"ServiceMethod\": \"reduce\",\n            \"CompensateState\": \"CompensateReduceInventory\",\n            \"Next\": \"ChoiceState\",\n            \"Input\": [\n                \"$.[businessKey]\",\n                \"$.[count]\"\n            ],\n            \"Output\": {\n                \"reduceInventoryResult\": \"$.#root\"\n            },\n            \"Status\": {\n                \"#root == true\": \"SU\",\n                \"#root == false\": \"FA\",\n                \"$Exception{java.lang.Throwable}\": \"UN\"\n            }\n        },\n        \"ChoiceState\":{\n            \"Type\": \"Choice\",\n            \"Choices\":[\n                {\n                    \"Expression\":\"[reduceInventoryResult] == true\",\n                    \"Next\":\"ReduceBalance\"\n                }\n            ],\n            \"Default\":\"Fail\"\n        },\n        \"ReduceBalance\": {\n            \"Type\": \"ServiceTask\",\n            \"ServiceName\": \"balanceAction\",\n            \"ServiceMethod\": \"reduce\",\n            \"CompensateState\": \"CompensateReduceBalance\",\n            \"Input\": [\n                \"$.[businessKey]\",\n                \"$.[amount]\",\n                {\n                    \"throwException\" : \"$.[mockReduceBalanceFail]\"\n                }\n            ],\n            \"Output\": {\n                \"compensateReduceBalanceResult\": \"$.#root\"\n            },\n            \"Status\": {\n                \"#root == true\": \"SU\",\n                \"#root == false\": \"FA\",\n                \"$Exception{java.lang.Throwable}\": \"UN\"\n            },\n            \"Catch\": [\n                {\n                    \"Exceptions\": [\n                        \"java.lang.Throwable\"\n                    ],\n                    \"Next\": \"CompensationTrigger\"\n                }\n            ],\n            \"Next\": \"Succeed\"\n        },\n        \"CompensateReduceInventory\": {\n            \"Type\": \"ServiceTask\",\n            \"ServiceName\": \"inventoryAction\",\n            \"ServiceMethod\": \"compensateReduce\",\n            \"Input\": [\n                \"$.[businessKey]\"\n            ]\n        },\n        \"CompensateReduceBalance\": {\n            \"Type\": \"ServiceTask\",\n            \"ServiceName\": \"balanceAction\",\n            \"ServiceMethod\": \"compensateReduce\",\n            \"Input\": [\n                \"$.[businessKey]\"\n            ]\n        },\n        \"CompensationTrigger\": {\n            \"Type\": \"CompensationTrigger\",\n            \"Next\": \"Fail\"\n        },\n        \"Succeed\": {\n            \"Type\":\"Succeed\"\n        },\n        \"Fail\": {\n            \"Type\":\"Fail\",\n            \"ErrorCode\": \"PURCHASE_FAILED\",\n            \"Message\": \"purchase failed\"\n        }\n    }\n}\n
\n

该json表示的状态图:

\n

\"该json表示的状态图\"

\n

状态语言在一定程度上参考了AWS Step Functions

\n

"状态机" 属性简介:

\n
    \n
  • Name: 表示状态机的名称,必须唯一
  • \n
  • Comment: 状态机的描述
  • \n
  • Version: 状态机定义版本
  • \n
  • StartState: 启动时运行的第一个"状态"
  • \n
  • States: 状态列表,是一个map结构,key是"状态"的名称,在状态机内必须唯一
  • \n
\n

"状态" 属性简介:

\n
    \n
  • Type: "状态" 的类型,比如有:\n
      \n
    • ServiceTask: 执行调用服务任务
    • \n
    • Choice: 单条件选择路由
    • \n
    • CompensationTrigger: 触发补偿流程
    • \n
    • Succeed: 状态机正常结束
    • \n
    • Fail: 状态机异常结束
    • \n
    • SubStateMachine: 调用子状态机
    • \n
    • CompensateSubMachine: 用于补偿一个子状态机
    • \n
    \n
  • \n
  • ServiceName: 服务名称,通常是服务的beanId
  • \n
  • ServiceMethod: 服务方法名称
  • \n
  • CompensateState: 该"状态"的补偿"状态"
  • \n
  • Input: 调用服务的输入参数列表, 是一个数组, 对应于服务方法的参数列表, $.表示使用表达式从状态机上下文中取参数,表达使用的SpringEL, 如果是常量直接写值即可
  • \n
  • Ouput: 将服务返回的参数赋值到状态机上下文中, 是一个map结构,key为放入到状态机上文时的key(状态机上下文也是一个map),value中$.是表示SpringEL表达式,表示从服务的返回参数中取值,#root表示服务的整个返回参数
  • \n
  • Status: 服务执行状态映射,框架定义了三个状态,SU 成功、FA 失败、UN 未知, 我们需要把服务执行的状态映射成这三个状态,帮助框架判断整个事务的一致性,是一个map结构,key是条件表达式,一般是取服务的返回值或抛出的异常进行判断,默认是SpringEL表达式判断服务返回参数,带$Exception{开头表示判断异常类型。value是当这个条件表达式成立时则将服务执行状态映射成这个值
  • \n
  • Catch: 捕获到异常后的路由
  • \n
  • Next: 服务执行完成后下一个执行的"状态"
  • \n
  • Choices: Choice类型的"状态"里, 可选的分支列表, 分支中的Expression为SpringEL表达式, Next为当表达式成立时执行的下一个"状态"
  • \n
  • ErrorCode: Fail类型"状态"的错误码
  • \n
  • Message: Fail类型"状态"的错误信息
  • \n
\n

更多详细的状态语言解释请看State language referance章节

\n

更多详细的状态语言使用示例见https://github.com/seata/seata/tree/develop/test/src/test/java/io/seata/saga/engine

\n

Demo 运行指南

\n

step 1 启动 SEATA Server

\n

运行 SeataServerStarter ,启动 Seata Server;

\n

step 2 启动 dubbo provider Demo

\n

运行 DubboSagaProviderStarter ,启动 dubbo provider;

\n

step 3 启动 Saga Demo

\n

运行 DubboSagaTransactionStarter , 启动 demo工程;

\n
\n

Demo中的数据库使用的是H2内存数据库, 生产上建议使用与业务相同的库, 目前支持Oracle, Mysql, DB2. 建表语句在 https://github.com/seata/seata/tree/develop/saga/seata-saga-engine-store/src/main/resources/sql

\n
\n
\n

Demo中还有调用本地服务和调用SOFA RPC服务的示例

\n
\n

状态机设计器

\n

Seata Saga 提供了一个可视化的状态机设计器方便用户使用,代码和运行指南请参考:\nhttps://github.com/seata/seata/tree/develop/saga/seata-saga-statemachine-designer

\n

状态机设计器截图:\n\"状态机设计器\"

\n

状态机设计器演示地址:http://seata.io/saga_designer/index.html

\n

最佳实践

\n

Saga 服务设计的实践经验

\n

允许空补偿

\n
    \n
  • 空补偿:原服务未执行,补偿服务执行了
  • \n
  • 出现原因:\n
      \n
    • 原服务 超时(丢包)
    • \n
    • Saga 事务触发 回滚
    • \n
    • 未收到 原服务请求,先收到 补偿请求
    • \n
    \n
  • \n
\n

所以服务设计时需要允许空补偿, 即没有找到要补偿的业务主键时返回补偿成功并将原业务主键记录下来

\n

防悬挂控制

\n
    \n
  • 悬挂:补偿服务 比 原服务 先执行
  • \n
  • 出现原因:\n
      \n
    • 原服务 超时(拥堵)
    • \n
    • Saga 事务回滚,触发 回滚
    • \n
    • 拥堵的 原服务 到达
    • \n
    \n
  • \n
\n

所以要检查当前业务主键是否已经在空补偿记录下来的业务主键中存在,如果存在则要拒绝服务的执行

\n

幂等控制

\n
    \n
  • 原服务与补偿服务都需要保证幂等性, 由于网络可能超时, 可以设置重试策略,重试发生时要通过幂等控制避免业务数据重复更新
  • \n
\n

缺乏隔离性的应对

\n
    \n
  • 由于 Saga 事务不保证隔离性, 在极端情况下可能由于脏写无法完成回滚操作, 比如举一个极端的例子, 分布式事务内先给用户A充值, 然后给用户B扣减余额, 如果在给A用户充值成功, 在事务提交以前, A用户把余额消费掉了, 如果事务发生回滚, 这时则没有办法进行补偿了。这就是缺乏隔离性造成的典型的问题, 实践中一般的应对方法是:\n
      \n
    • 业务流程设计时遵循“宁可长款, 不可短款”的原则, 长款意思是客户少了钱机构多了钱, 以机构信誉可以给客户退款, 反之则是短款, 少的钱可能追不回来了。所以在业务流程设计上一定是先扣款。
    • \n
    • 有些业务场景可以允许让业务最终成功, 在回滚不了的情况下可以继续重试完成后面的流程, 所以状态机引擎除了提供“回滚”能力还需要提供“向前”恢复上下文继续执行的能力, 让业务最终执行成功, 达到最终一致性的目的。
    • \n
    \n
  • \n
\n

API referance

\n

StateMachineEngine API

\n
public interface StateMachineEngine {\n\n    /**\n     * start a state machine instance\n     * @param stateMachineName\n     * @param tenantId\n     * @param startParams\n     * @return\n     * @throws EngineExecutionException\n     */\n    StateMachineInstance start(String stateMachineName, String tenantId, Map<String, Object> startParams) throws EngineExecutionException;\n\n    /**\n     * start a state machine instance with businessKey\n     * @param stateMachineName\n     * @param tenantId\n     * @param businessKey\n     * @param startParams\n     * @return\n     * @throws EngineExecutionException\n     */\n    StateMachineInstance startWithBusinessKey(String stateMachineName, String tenantId, String businessKey, Map<String, Object> startParams) throws EngineExecutionException;\n\n    /**\n     * start a state machine instance asynchronously\n     * @param stateMachineName\n     * @param tenantId\n     * @param startParams\n     * @param callback\n     * @return\n     * @throws EngineExecutionException\n     */\n    StateMachineInstance startAsync(String stateMachineName, String tenantId, Map<String, Object> startParams, AsyncCallback callback) throws EngineExecutionException;\n\n    /**\n     * start a state machine instance asynchronously with businessKey\n     * @param stateMachineName\n     * @param tenantId\n     * @param businessKey\n     * @param startParams\n     * @param callback\n     * @return\n     * @throws EngineExecutionException\n     */\n    StateMachineInstance startWithBusinessKeyAsync(String stateMachineName, String tenantId, String businessKey, Map<String, Object> startParams, AsyncCallback callback) throws EngineExecutionException;\n\n    /**\n     * forward restart a failed state machine instance\n     * @param stateMachineInstId\n     * @param replaceParams\n     * @return\n     * @throws ForwardInvalidException\n     */\n    StateMachineInstance forward(String stateMachineInstId, Map<String, Object> replaceParams) throws ForwardInvalidException;\n\n    /**\n     * forward restart a failed state machine instance asynchronously\n     * @param stateMachineInstId\n     * @param replaceParams\n     * @param callback\n     * @return\n     * @throws ForwardInvalidException\n     */\n    StateMachineInstance forwardAsync(String stateMachineInstId, Map<String, Object> replaceParams, AsyncCallback callback) throws ForwardInvalidException;\n\n    /**\n     * compensate a state machine instance\n     * @param stateMachineInstId\n     * @param replaceParams\n     * @return\n     * @throws EngineExecutionException\n     */\n    StateMachineInstance compensate(String stateMachineInstId, Map<String, Object> replaceParams) throws EngineExecutionException;\n\n    /**\n     * compensate a state machine instance asynchronously\n     * @param stateMachineInstId\n     * @param replaceParams\n     * @param callback\n     * @return\n     * @throws EngineExecutionException\n     */\n    StateMachineInstance compensateAsync(String stateMachineInstId, Map<String, Object> replaceParams, AsyncCallback callback) throws EngineExecutionException;\n\n    /**\n     * skip current failed state instance and forward restart state machine instance\n     * @param stateMachineInstId\n     * @return\n     * @throws EngineExecutionException\n     */\n    StateMachineInstance skipAndForward(String stateMachineInstId) throws EngineExecutionException;\n\n    /**\n     * skip current failed state instance and forward restart state machine instance asynchronously\n     * @param stateMachineInstId\n     * @param callback\n     * @return\n     * @throws EngineExecutionException\n     */\n    StateMachineInstance skipAndForwardAsync(String stateMachineInstId, AsyncCallback callback) throws EngineExecutionException;\n\n    /**\n     * get state machine configurations\n     * @return\n     */\n    StateMachineConfig getStateMachineConfig();\n}\n
\n

StateMachine Execution Instance API:

\n
StateLogRepository stateLogRepository = stateMachineEngine.getStateMachineConfig().getStateLogRepository();\nStateMachineInstance stateMachineInstance = stateLogRepository.getStateMachineInstanceByBusinessKey(businessKey, tenantId);\n\n/**\n * State Log Repository\n *\n * @author lorne.cl\n */\npublic interface StateLogRepository {\n\n    /**\n     * Get state machine instance\n     *\n     * @param stateMachineInstanceId\n     * @return\n     */\n    StateMachineInstance getStateMachineInstance(String stateMachineInstanceId);\n\n    /**\n     * Get state machine instance by businessKey\n     *\n     * @param businessKey\n     * @param tenantId\n     * @return\n     */\n    StateMachineInstance getStateMachineInstanceByBusinessKey(String businessKey, String tenantId);\n\n    /**\n     * Query the list of state machine instances by parent id\n     *\n     * @param parentId\n     * @return\n     */\n    List<StateMachineInstance> queryStateMachineInstanceByParentId(String parentId);\n\n    /**\n     * Get state instance\n     *\n     * @param stateInstanceId\n     * @param machineInstId\n     * @return\n     */\n    StateInstance getStateInstance(String stateInstanceId, String machineInstId);\n\n    /**\n     * Get a list of state instances by state machine instance id\n     *\n     * @param stateMachineInstanceId\n     * @return\n     */\n    List<StateInstance> queryStateInstanceListByMachineInstanceId(String stateMachineInstanceId);\n}\n
\n

StateMachine Definition API:

\n
StateMachineRepository stateMachineRepository = stateMachineEngine.getStateMachineConfig().getStateMachineRepository();\nStateMachine stateMachine = stateMachineRepository.getStateMachine(stateMachineName, tenantId);\n\n/**\n * StateMachineRepository\n *\n * @author lorne.cl\n */\npublic interface StateMachineRepository {\n\n    /**\n     * Gets get state machine by id.\n     *\n     * @param stateMachineId the state machine id\n     * @return the get state machine by id\n     */\n    StateMachine getStateMachineById(String stateMachineId);\n\n    /**\n     * Gets get state machine.\n     *\n     * @param stateMachineName the state machine name\n     * @param tenantId         the tenant id\n     * @return the get state machine\n     */\n    StateMachine getStateMachine(String stateMachineName, String tenantId);\n\n    /**\n     * Gets get state machine.\n     *\n     * @param stateMachineName the state machine name\n     * @param tenantId         the tenant id\n     * @param version          the version\n     * @return the get state machine\n     */\n    StateMachine getStateMachine(String stateMachineName, String tenantId, String version);\n\n    /**\n     * Register the state machine to the repository (if the same version already exists, return the existing version)\n     *\n     * @param stateMachine\n     */\n    StateMachine registryStateMachine(StateMachine stateMachine);\n\n    /**\n     * registry by resources\n     *\n     * @param resources\n     * @param tenantId\n     */\n    void registryByResources(Resource[] resources, String tenantId) throws IOException;\n}\n
\n

Config referance

\n

在Spring Bean配置文件中配置一个StateMachineEngine

\n
<bean id=\"dataSource\" class=\"...\">\n...\n<bean>\n<bean id=\"stateMachineEngine\" class=\"io.seata.saga.engine.impl.ProcessCtrlStateMachineEngine\">\n        <property name=\"stateMachineConfig\" ref=\"dbStateMachineConfig\"></property>\n</bean>\n<bean id=\"dbStateMachineConfig\" class=\"io.seata.saga.engine.config.DbStateMachineConfig\">\n    <property name=\"dataSource\" ref=\"dataSource\"></property>\n    <property name=\"resources\" value=\"statelang/*.json\"></property>\n    <property name=\"enableAsync\" value=\"true\"></property>\n    <property name=\"threadPoolExecutor\" ref=\"threadExecutor\"></property><!-- 事件驱动执行时使用的线程池, 如果所有状态机都同步执行可以不需要 -->\n    <property name=\"applicationId\" value=\"saga_sample\"></property>\n    <property name=\"txServiceGroup\" value=\"my_test_tx_group\"></property>\n</bean>\n<bean id=\"threadExecutor\"\n        class=\"org.springframework.scheduling.concurrent.ThreadPoolExecutorFactoryBean\">\n    <property name=\"threadNamePrefix\" value=\"SAGA_ASYNC_EXE_\" />\n    <property name=\"corePoolSize\" value=\"1\" />\n    <property name=\"maxPoolSize\" value=\"20\" />\n</bean>\n\n<!-- Seata Server进行事务恢复时需要通过这个Holder拿到stateMachineEngine实例 -->\n<bean class=\"io.seata.saga.rm.StateMachineEngineHolder\">\n    <property name=\"stateMachineEngine\" ref=\"stateMachineEngine\"/>\n</bean>\n
\n

State language referance

\n

"状态机"的属性列表

\n
{\n    \"Name\": \"reduceInventoryAndBalance\",\n    \"Comment\": \"reduce inventory then reduce balance in a transaction\",\n    \"StartState\": \"ReduceInventory\",\n    \"Version\": \"0.0.1\",\n    \"States\": {\n    }\n}\n
\n
    \n
  • Name: 表示状态机的名称,必须唯一
  • \n
  • Comment: 状态机的描述
  • \n
  • Version: 状态机定义版本
  • \n
  • StartState: 启动时运行的第一个"状态"
  • \n
  • States: 状态列表,是一个map结构,key是"状态"的名称,在状态机内必须唯一, value是一个map结构表示"状态"的属性列表
  • \n
\n

各种"状态"的属性列表

\n

ServiceTask:

\n
\"States\": {\n    ...\n    \"ReduceBalance\": {\n        \"Type\": \"ServiceTask\",\n        \"ServiceName\": \"balanceAction\",\n        \"ServiceMethod\": \"reduce\",\n        \"CompensateState\": \"CompensateReduceBalance\",\n        \"IsForUpdate\": true,\n        \"IsPersist\": true,\n        \"IsAsync\": false,\n        \"Input\": [\n            \"$.[businessKey]\",\n            \"$.[amount]\",\n            {\n                \"throwException\" : \"$.[mockReduceBalanceFail]\"\n            }\n        ],\n        \"Output\": {\n            \"compensateReduceBalanceResult\": \"$.#root\"\n        },\n        \"Status\": {\n            \"#root == true\": \"SU\",\n            \"#root == false\": \"FA\",\n            \"$Exception{java.lang.Throwable}\": \"UN\"\n        },\n        \"Retry\": [\n            {\n                \"Exceptions\": [\"io.seata.saga.engine.mock.DemoException\"],\n                \"IntervalSeconds\": 1.5,\n                \"MaxAttempts\": 3,\n                \"BackoffRate\": 1.5\n            },\n            {\n                \"IntervalSeconds\": 1,\n                \"MaxAttempts\": 3,\n                \"BackoffRate\": 1.5\n            }\n        ],\n        \"Catch\": [\n            {\n                \"Exceptions\": [\n                    \"java.lang.Throwable\"\n                ],\n                \"Next\": \"CompensationTrigger\"\n            }\n        ],\n        \"Next\": \"Succeed\"\n    }\n    ...\n}\n
\n
    \n
  • ServiceName: 服务名称,通常是服务的beanId
  • \n
  • ServiceMethod: 服务方法名称
  • \n
  • CompensateState: 该"状态"的补偿"状态"
  • \n
  • IsForUpdate: 标识该服务会更新数据, 默认是false, 如果配置了CompensateState则默认是true, 有补偿服务的服务肯定是数据更新类服务
  • \n
  • IsPersist: 执行日志是否进行存储, 默认是true, 有一些查询类的服务可以配置为false, 执行日志不进行存储提高性能, 因为当异常恢复时可以重复执行
  • \n
  • IsAsync: 异步调用服务, 注意: 因为异步调用服务会忽略服务的返回结果, 所以用户定义的服务执行状态映射(下面的Status属性)将被忽略, 默认为服务调用成功, 如果提交异步调用就失败(比如线程池已满)则为服务执行状态为失败
  • \n
  • Input: 调用服务的输入参数列表, 是一个数组, 对应于服务方法的参数列表, $.表示使用表达式从状态机上下文中取参数,表达使用的SpringEL, 如果是常量直接写值即可。复杂的参数如何传入见:复杂参数的Input定义
  • \n
  • Output: 将服务返回的参数赋值到状态机上下文中, 是一个map结构,key为放入到状态机上文时的key(状态机上下文也是一个map),value中$.是表示SpringEL表达式,表示从服务的返回参数中取值,#root表示服务的整个返回参数
  • \n
  • Status: 服务执行状态映射,框架定义了三个状态,SU 成功、FA 失败、UN 未知, 我们需要把服务执行的状态映射成这三个状态,帮助框架判断整个事务的一致性,是一个map结构,key是条件表达式,一般是取服务的返回值或抛出的异常进行判断,默认是SpringEL表达式判断服务返回参数,带$Exception{开头表示判断异常类型。value是当这个条件表达式成立时则将服务执行状态映射成这个值
  • \n
  • Catch: 捕获到异常后的路由
  • \n
  • Retry: 捕获异常后的重试策略, 是个数组可以配置多个规则, Exceptions 为匹配的的异常列表, IntervalSeconds 为重试间隔, MaxAttempts 为最大重试次数, BackoffRate 下一次重试间隔相对于上一次重试间隔的倍数,比如说上次一重试间隔是2秒, BackoffRate=1.5 则下一次重试间隔是3秒。Exceptions 属性可以不配置, 不配置时表示框架自动匹配网络超时异常。当在重试过程中发生了别的异常,框架会重新匹配规则,并按新规则进行重试,同一种规则的总重试次数不会超过该规则的MaxAttempts
  • \n
  • Next: 服务执行完成后下一个执行的"状态"
  • \n
\n
\n

当没有配置Status对服务执行状态进行映射, 系统会自动判断状态:

\n
    \n
  • 没有异常则认为执行成功,
  • \n
  • 如果有异常, 则判断异常是不是网路连接超时, 如果是则认为是FA
  • \n
  • 如果是其它异常, 服务IsForUpdate=true则状态为UN, 否则为FA
  • \n
\n
\n
\n

整个状态机的执行状态如何判断?是由框架自己判断的, 状态机有两个状态: status(正向执行状态), compensateStatus(补偿状态):

\n
    \n
  • 如果所有服务执行成功(事务提交成功)则status=SU, compensateStatus=null
  • \n
  • 如果有服务执行失败且存在更新类服务执行成功且没有进行补偿(事务提交失败) 则status=UN, compensateStatus=null
  • \n
  • 如果有服务执行失败且不存在更新类服务执行成功且没有进行补偿(事务提交失败) 则status=FA, compensateStatus=null
  • \n
  • 如果补偿成功(事务回滚成功)则status=FA/UN, compensateStatus=SU
  • \n
  • 发生补偿且有未补偿成功的服务(回滚失败)则status=FA/UN, compensateStatus=UN
  • \n
  • 存在事务提交或回滚失败的情况Seata Sever都会不断发起重试
  • \n
\n
\n

Choice:

\n
\"ChoiceState\":{\n    \"Type\": \"Choice\",\n    \"Choices\":[\n        {\n            \"Expression\":\"[reduceInventoryResult] == true\",\n            \"Next\":\"ReduceBalance\"\n        }\n    ],\n    \"Default\":\"Fail\"\n}\n
\n

Choice类型的"状态"是单项选择路由\nChoices: 可选的分支列表, 只会选择第一个条件成立的分支\nExpression: SpringEL表达式\nNext: 当Expression表达式成立时执行的下一个"状态"

\n

Succeed:

\n
\"Succeed\": {\n    \"Type\":\"Succeed\"\n}\n
\n

运行到"Succeed状态"表示状态机正常结束, 正常结束不代表成功结束, 是否成功要看每个"状态"是否都成功

\n

Fail:

\n
\"Fail\": {\n    \"Type\":\"Fail\",\n    \"ErrorCode\": \"PURCHASE_FAILED\",\n    \"Message\": \"purchase failed\"\n}\n
\n

运行到"Fail状态"状态机异常结束, 异常结束时可以配置ErrorCode和Message, 表示错误码和错误信息, 可以用于给调用方返回错误码和消息

\n

CompensationTrigger:

\n
\"CompensationTrigger\": {\n    \"Type\": \"CompensationTrigger\",\n    \"Next\": \"Fail\"\n}\n
\n

CompensationTrigger类型的state是用于触发补偿事件, 回滚分布式事务\nNext: 补偿成功后路由到的state

\n

SubStateMachine:

\n
\"CallSubStateMachine\": {\n    \"Type\": \"SubStateMachine\",\n    \"StateMachineName\": \"simpleCompensationStateMachine\",\n    \"CompensateState\": \"CompensateSubMachine\",\n    \"Input\": [\n        {\n            \"a\": \"$.1\",\n            \"barThrowException\": \"$.[barThrowException]\",\n            \"fooThrowException\": \"$.[fooThrowException]\",\n            \"compensateFooThrowException\": \"$.[compensateFooThrowException]\"\n        }\n    ],\n    \"Output\": {\n        \"fooResult\": \"$.#root\"\n    },\n    \"Next\": \"Succeed\"\n}\n
\n

SubStateMachine类型的"状态"是调用子状态机\nStateMachineName: 要调用的子状态机名称\nCompensateState: 子状态机的补偿state, 可以不配置, 系统会自动创建它的补偿state, 子状态机的补偿实际就是调用子状态机的compensate方法, 所以用户并不需要自己实现一个对子状态机的补偿服务。当配置这个属性时, 可以里利用Input属性自定义传入一些变量, 见下面的CompensateSubMachine

\n

CompensateSubMachine:

\n
\"CompensateSubMachine\": {\n    \"Type\": \"CompensateSubMachine\",\n    \"Input\": [\n        {\n            \"compensateFooThrowException\": \"$.[compensateFooThrowException]\"\n        }\n    ]\n}\n
\n

CompensateSubMachine类型的state是专门用于补偿一个子状态机的state,它会调用子状态机的compensate方法,可以利用Input属性传入一些自定义的变量, Status属性自定判断补偿是否成功

\n

复杂参数的Input定义

\n
\"FirstState\": {\n    \"Type\": \"ServiceTask\",\n    \"ServiceName\": \"demoService\",\n    \"ServiceMethod\": \"complexParameterMethod\",\n    \"Next\": \"ChoiceState\",\n    \"ParameterTypes\" : [\"java.lang.String\", \"int\", \"io.seata.saga.engine.mock.DemoService$People\", \"[Lio.seata.saga.engine.mock.DemoService$People;\", \"java.util.List\", \"java.util.Map\"],\n    \"Input\": [\n        \"$.[people].name\",\n        \"$.[people].age\",\n        {\n            \"name\": \"$.[people].name\",\n            \"age\": \"$.[people].age\",\n            \"childrenArray\": [\n                {\n                    \"name\": \"$.[people].name\",\n                    \"age\": \"$.[people].age\"\n                },\n                {\n                    \"name\": \"$.[people].name\",\n                    \"age\": \"$.[people].age\"\n                }\n            ],\n            \"childrenList\": [\n                {\n                    \"name\": \"$.[people].name\",\n                    \"age\": \"$.[people].age\"\n                },\n                {\n                    \"name\": \"$.[people].name\",\n                    \"age\": \"$.[people].age\"\n                }\n            ],\n            \"childrenMap\": {\n                \"lilei\": {\n                    \"name\": \"$.[people].name\",\n                    \"age\": \"$.[people].age\"\n                }\n            }\n        },\n        [\n            {\n                \"name\": \"$.[people].name\",\n                \"age\": \"$.[people].age\"\n            },\n            {\n                \"name\": \"$.[people].name\",\n                \"age\": \"$.[people].age\"\n            }\n        ],\n        [\n            {\n                \"@type\": \"io.seata.saga.engine.mock.DemoService$People\",\n                \"name\": \"$.[people].name\",\n                \"age\": \"$.[people].age\"\n            }\n        ],\n        {\n            \"lilei\": {\n                \"@type\": \"io.seata.saga.engine.mock.DemoService$People\",\n                \"name\": \"$.[people].name\",\n                \"age\": \"$.[people].age\"\n            }\n        }\n    ],\n    \"Output\": {\n        \"complexParameterMethodResult\": \"$.#root\"\n    }\n}\n
\n

上面的complexParameterMethod方法定义如下:

\n
People complexParameterMethod(String name, int age, People people, People[] peopleArrya, List<People> peopleList, Map<String, People> peopleMap)\n\nclass People {\n\n    private String name;\n    private int    age;\n\n    private People[] childrenArray;\n    private List<People> childrenList;\n    private Map<String, People> childrenMap;\n\n    ...\n}\n
\n

启动状态机时传入参数:

\n
Map<String, Object> paramMap = new HashMap<>(1);\nPeople people = new People();\npeople.setName(\"lilei\");\npeople.setAge(18);\nparamMap.put(\"people\", people);\nString stateMachineName = \"simpleStateMachineWithComplexParams\";\nStateMachineInstance inst = stateMachineEngine.start(stateMachineName, null, paramMap);\n
\n
\n

注意ParameterTypes属性是可以不用传的,调用的方法的参数列表中有Map, List这种可以带泛型的集合类型, 因为java编译会丢失泛型, 所以需要用这个属性, 同时在Input的json中对应的对这个json加"@type"来申明泛型(集合的元素类型)

\n
\n

FAQ

\n
\n

问: saga服务流程可以不配置吗,使用全局事务id串起来,这样省去配置的工作量,再加上人工配置难免会配置错误?

\n

答: saga一般有两种实现,一种是基于状态机定义,比如apache camel saga、eventuate,一种是基于注解+拦截器实现,比如serviceComb saga,后者是不需要配置状态图的。由于 Saga 事务不保证隔离性, 在极端情况下可能由于脏写无法完成回滚操作, 比如举一个极端的例子, 分布式事务内先给用户A充值, 然后给用户B扣减余额, 如果在给A用户充值成功, 在事务提交以前, A用户把余额消费掉了, 如果事务发生回滚, 这时则没有办法进行补偿了,有些业务场景可以允许让业务最终成功, 在回滚不了的情况下可以继续重试完成后面的流程, 基于状态机引擎除可以提供“回滚”能力外, 还可以提供“向前”恢复上下文继续执行的能力, 让业务最终执行成功, 达到最终一致性的目的,所以在实际生产中基于状态机的实现应用更多。后续也会提供基于注解+拦截器实现。

\n
\n

问: 比如有 服务A在系统1里面,服务B在系统2里面。全局事务由A开启,流程调用B开启子事务,那系统2也需要维护Saga状态机的三个表吗,也需要在Spring Bean配置文件中配置一个StateMachineEngine吗?

\n

答: 不需要, 只在发起方记录日志. 由于只在发起方记录日志同时对参与者服务没有接口参数的要求,使得Saga可以方便集成其它机构或遗留系统的服务

\n
\n

问: 如果 系统1和系统2里面的服务,可以相互调用。系统12都可以开启全局事务,可以这样使用吗。那1和2 都需要维护Saga状态机的三个表,也需要在Spring Bean配置文件中配置一个StateMachineEngine?

\n

答: 可以这样使用,如果两个系统都开启saga事务,那就要记录那三个表配置StateMachineEngine

\n
\n

问: 使用 Seata 的时候,现在是 AT 模式 如果改成 saga 模式的话,改造会大吗?

\n

答: AT 模式完全是透明的,Saga 是有侵入性的,要配置状态机 json,如果服务多改造会比较大

\n
\n

问: Saga 模式是不是基于 AT 来加强的长事务处理呢?

\n

答: 没有基于 AT,客户端完全是两套,Server 端是复用的。你也可以看 Saga 的单元测试,那里有很多示例:https://github.com/seata/seata/tree/develop/test/src/test/java/io/seata/saga/engine

\n
\n

问: 开发者文档中状态机引擎原理图里的EventQueue只是开启分布式事务的系统来进行事件驱动,调用其它系统服务像调用本地一样。系统之间还是RPC调用是吧。而不是系统之前也是纯事件驱动的?("系统之间也是纯事件驱动的" 指 RPC 也是非阻塞的)

\n

答: 节点与节点间是事件驱动的, RPC 的非阻塞需要 rpc client 支持,理论上也是可以的。rpc client 如果也是非阻塞 IO,那么所有环节都是异步了。

\n
\n

问: 考虑一个业务流程, 它后续的子流程, 不管谁先运行都不会相互影响,可以异步调用。子流程是其它系统服务。Seata Saga 是不是实现了这点,其实我没看明白 ,Seata Saga异步调用具体是不是各个节点异步了?

\n

答: Saga的异步启动一个状态机(stateMachineEngine.startAsync)是指状态机内所有的状态都是事件驱动来执行的, 整个流程实际是同步的, 上一个状态结束才产生下一个状态的事件. 而异步调用一个服务是配置该ServiceTask为"IsAsync":true, 这个服务将会异步调用, 不会阻塞状态机的推进, 状态机不关心它的执行结果.

\n
\n

问: Saga源码中事件驱动层同步bus和异步bus是什么作用?

\n

答: 同步BUS是线程阻塞的,等整个状态机执行完毕才返回,异步BUS是非线程阻塞的,调用后立即返回,状态机执行完毕后回调你的Callback。

\n
\n

问: IsPersist: 执行日志是否进行存储,默认是true,有一些查询类的服务可以配置在false,执行日志不进行存储提高性能,因为当异常恢复时可以重复执行?

\n

答: 是的可以配置成false, 不过建议先保持默认,这样在查询执行日志比较全,真的要做性能调优再配,一般不会有性能问题

\n
\n

问: seata saga 开启事务的客户端或者seata server服务端宕机或者重启,未完成的状态机实例是怎么保证继续执行下去的?谁去触发这个操作?

\n

答: 状态机实例在本地数据库有记录日志,通过日志恢复。seata server 会触触发事务恢复。

\n
\n

问: saga 的json文件 支持热部署吗?

\n

答: 支持, stateMachineEngine.getStateMachineConfig().getStateMachineRepository().registryByResources()。不过java代码和服务需要自己实现支持热部署

\n
\n

问: 出参入参都放在saga的上下文中,如果参数内容较多较大,业务量又大的话,对内存有限制吗?

\n

答: 没有做限制,建议无关的参数不要放到上下文。下一个服务需要用的参数、或用于分支判断的参数可以放入上下文。

\n
\n

问: 确认个事情:每个节点,要么自己方法内部 Catch 异常处理,使最终有返回信息。要么自己内部可以不处理,交由状态机引擎捕获异常,在 json 中定义 Catch 属性。 而不是补偿节点能够自动触发补偿,需要补偿必须手动在 json,由 Catch 或者 Choices 属性路由到 CompensationTrigger?

\n

答: 对的,这个是为了提高灵活性。用户可以自己控制是否进行回滚,因为并不是所有异常都要回滚,可能有一些自定义处理手段。

\n
\n

问: 所以 Catch 和 Choices 可以随便路由到想要的 state 对吧?

\n

答: 是的。这种自定义出发补偿的设计是参考了 bpmn2.0 的。

\n
\n

问: 还有关于 json 文件,我打算一条流程,就定义一个 json,虽然有的流程很像,用 Choices,可以解决。但是感觉 json 还是要尽量简单。这样考虑对吗?

\n

答: 你可以考虑用子状态机来复用,子状态机会多生成一行 stateMachineInstance 记录,但对性能影响应该不大。

\n
\n", - "link": "/zh-cn/docs/user/saga.html", - "meta": { - "title": "Seata Saga 模式", - "keywords": "Seata", - "description": "Saga模式是SEATA提供的长事务解决方案,在Saga模式中,业务流程中每个参与者都提交本地事务,当出现某一个参与者失败则补偿前面已经成功的参与者,一阶段正向服务和二阶段补偿服务都由业务开发实现。" - } -} \ No newline at end of file diff --git a/zh-cn/docs/user/spring.html b/zh-cn/docs/user/spring.html deleted file mode 100644 index 126fbbdf..00000000 --- a/zh-cn/docs/user/spring.html +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - - - - - Seata Spring支持 - - - - -
文档

Spring支持

-

TBD

-
- - - - - - - diff --git a/zh-cn/docs/user/spring.json b/zh-cn/docs/user/spring.json deleted file mode 100644 index 1205e265..00000000 --- a/zh-cn/docs/user/spring.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "spring.md", - "__html": "

Spring支持

\n

TBD

\n", - "link": "/zh-cn/docs/user/spring.html", - "meta": { - "title": "Seata Spring支持", - "keywords": "Seata", - "description": "Seata Spring支持。" - } -} \ No newline at end of file diff --git a/zh-cn/index.html b/zh-cn/index.html deleted file mode 100644 index 6c793dec..00000000 --- a/zh-cn/index.html +++ /dev/null @@ -1,32 +0,0 @@ - - - - - - - - - - Seata - - - - -

Seata

Seata 是一款开源的分布式事务解决方案,致力于在微服务架构下提供高性能和简单易用的分布式事务服务。

Seata 是什么?

Seata 是一款开源的分布式事务解决方案,致力于在微服务架构下提供高性能和简单易用的分布式事务服务。在 Seata 开源之前,Seata 对应的内部版本在阿里经济体内部一直扮演着分布式一致性中间件的角色,帮助经济体平稳的度过历年的双11,对各BU业务进行了有力的支撑。经过多年沉淀与积累,商业化产品先后在阿里云、金融云进行售卖。2019.1 为了打造更加完善的技术生态和普惠技术成果,Seata 正式宣布对外开源,未来 Seata 将以社区共建的形式帮助其技术更加可靠与完备。

特色功能

  • 微服务框架支持

    目前已支持 Dubbo、Spring Cloud、Sofa-RPC、Motan 和 grpc 等RPC框架,其他框架持续集成中

  • AT 模式

    提供无侵入自动补偿的事务模式,目前已支持 MySQL、 Oracle 的AT模式、PostgreSQL、H2 开发中

  • TCC 模式

    支持 TCC 模式并可与 AT 混用,灵活度更高

  • SAGA 模式

    为长事务提供有效的解决方案

  • XA 模式(开发中)

    支持已实现 XA 接口的数据库的 XA 模式

  • 高可用

    支持基于数据库存储的集群模式,水平扩展能力强

- - - - - - -