diff --git a/.gitignore b/.gitignore
index d1039b5c4..5f059c267 100644
--- a/.gitignore
+++ b/.gitignore
@@ -25,5 +25,6 @@ rebar3.crashdump
.rebar3/
core_vnode_eqc.log
.idea
+.vscode/
*.iml
**/*.coverdata
diff --git a/docs/Diagrams.drawio b/docs/Diagrams.drawio
new file mode 100644
index 000000000..12ab1cb71
--- /dev/null
+++ b/docs/Diagrams.drawio
@@ -0,0 +1 @@
+7V1Zc+I4EP41PJLCF8fjkGST3cxuzT27+0IpIEAbYzmyICS/fmV8BJu2sbElVDW8BJAv+euvD7XUSse6Xm3vGPKXf9IZdjtmb7btWDcd0zSsfk98hC2vUcuwb0YNC0Zm8UnvDV/JG44b4+sWazLDQeZETqnLiZ9tnFLPw1OeaUOM0ZfsaXPqZp/qowU+aPg6Re5h608y48v4LczBe/s9Jotl8mSjP4qOrFBycvwmwRLN6Mtek3Xbsa4ZpTz6ttpeYzcEL8Eluu63gqNpxxj2eJULtv+svn/5+MBcf917/vcz//x9ybrxXTbIXccv3DH7rrjf2A+7zF9jHPrP67Cf4xViC+J1rA/iaM/fir+icfe2YXuXUz861t87xvGWd5FLFvF1U9FhzN7vKb4t4s/dkx+Thi8EPYnzrynD4uMj4Tg5RbzlY/4y0ebn25Ys33LSa7l4zqODw/BgYccPnpbtk5l5thlDchPdfUw3mM3dHUPmREhO9J6vxMeNIb7OGPW/ic7gUNS99G770k9EiZkAfK8pZsMdpivM2as45eWdyIYxjOm53GPxINFaFGvPIr34nWDiS8yxGnwzAb7lYBF64odfp3TlU2/3euP/MOevif4Nk4ZU8WwQkFLCH0dpm4F1DzMTQCxpY9hFnGyyxgNCMX7eJ0pEd9OHdU07uiS2lnb2BnQ+DwQD8kJIO11JLob5dv/p7v7H+vcH0/l7NES33YduYs/KBLNgdO1X511qk9FjcodeOdK9zLubVu8Aegdiq2m3wFYQFQNCRRvzeI+8mWDExSaW2MRSrteylKqpN1RoKcvJfzGVWVxGh5K5NjsfxsHax2xDAsrC3+MSFUj1lIkIZzIV8c1kGSnzxCUBxx5mkyC0tZBm50iwpxDt8z8+ag2yptk+tMzGCBC1I0s7Eg4dyGCBvYn3OAmEIHAbYtBGBMZxCQyUSgAYOaQSaAz/CnliYKUt+oZxdviBQLo9I8TwFAvLrJcRElHIUSGk56gRgiVTBxIhaCOAvBIMz64EtkwlCLA3010FIBGodcTOoQiqQqsNrHYVZveVwtqXymyNKQ0FNo5K6G0gtFSdCXHsHCijQ1TspF/7qKT2t3VYLGg8qk0q5AvxFpc8SM08SMr0WnkQ1bwDRttq8yAp8y95kGweBDKUhb6HCQ2drDlxJXmeGmRO5yZzXr83ujKHzsCwk7+HngiSo9O7ckaOaQ17yV9JqgAko9PRBt6E8NQPCXZi2V0c6COYrFys4ZUNwLsvFvucYgEcYztiCcM1V1qYXF8u5qC+wpxVMkWJ2urD8xnZVJVWacqqJAbIPOIc2jaorW6Oc0ahGkDSpVw02ihQfphZQX/gfFb7UI8fjKfnl8GQumS4mnjs7Q/nrVsH6RZAbgonkA+UlY4C4SqeLAaIGXDE12343PqoVchlGypxK0yjIt93yVQEx9Q7wYeKq89PSiCdISs/B2NbFJ00SiS1k0CqDa2pF7aWBmtmRv2jmJiQMrcyYIdRKc4Ia5Aoug0D8tLA+pIwOk74Wgkj1fwDUud5sFpLGJVrwK+bMAJxMaRM1+E9jW7HMTXlvgZrBmD8AcvcfLo6g/9ijdhMVwmoXLUEvoEOU0umdTxg6APje4kGW+uZpWsXkdUlUqgZKZw2taSaeCqnlsqpf4kUsp6qaPlDI081DVUZhS8oxT+dMgPlHHVQ8IyTrAihVgprh2dLc3ptIZoFFIy5LKWIFju3IkR1BRNip6zllvAbAOMH1eFTP5eDgoqUbIhh6Yntw3KYPv0udJJwgoNCfxbWwQojao3n1ONf4+MhmwIfTYm3+BYGUTfdMIS2xgKdxN058e/U25lxwycakF2iNg0s9iiaCzmqMtYukFr5+hS16JvH9Xszden0SZZaVwcpOZpbXAaV2alU61orSDkOuFSXUxtOKz+fMji3F6/lcsKluJMVDoKwDl4XTA8mShWuxwVfARopFM85I09WDuQEKIcV6NkDJ/QlYVlv3ZjPCGWEv06e13itL0MhVJVa0SQWqobqIwpw314zfcyoMTKvcsE7pPVKg3eouOHUYWi2MFEX1M3eIIc6EA2ojfKh0gcIzHmwmoRZGSRtPN/UKEBQKl1YAY2XCk2CXkGVdRRKtZl7Hab6c54cKgoB7WOqv+3DovVc/x0NAlIwH3jJ3R/ner2NhBQz7+yz/PZllh8O76UURS9KdLm5f5Ixs6zW01fYLUZ60aJ5NF8MEl7eBJ+t9e52f9FZuK3dT8Sny8titPpuqvrc6RkZCI3K1bqpy4Z3sJuSsn+NJ3R68lKm0c2dVf01AxUWoinNVRmAVjQumduHvrWKRjkSgDyjrBwLuAkmVNijOFoYVNhSDErxS1wOBNlqfaIFtCGLqBTmEikURgqlbK83nlVMvgoqKXktmlkZpl8rUCi0Cqn2TZcoWDZ3Ng3JW1DfZx0W8KqdmKqVhUZ+C0notoDMhk29QyDVlvJVWJy3Y+IjKfARZ8cQ4KKsCgiw/xosIMuHgkAS34FSR/J2GoZKRPWJe35EaZLTQ54zBBwnrVtTLfWz76tkXha/w/tNAetjmmcmNrvxcWv76jbkfTLNe7yiW+2+i5aUrFCCfcD1gb7SVqJKN16Ext/Ny0Mj7H1Gt6/tVIe2A792e+mCA9AqqOqCqJXbCQFMc8LbUcmCVEq9cwS9RlSulF+WtW4PBr503R5HHK9OBv6FsifMJj6lsgaqjW0JhD/0z0vk4S+lfG8ff22gd44HMGqdaNkmkXtLJk+EXxvcj8PeUsmF+Pn+L/qiEP/9Hx1at/8DzZxbc9o4FMc/DY9hbMnXx5LtdqftznY3O+nMvjCKrRg1tuXKApJ++hXYBl9kwBDL4oE48g1+/6OjI51jZvA+ef3EULb6k4Y4ngEjfJ3B32YAmNAxxJ9dy1vR4jmgaIgYCcuDjg0P5BcuG8vzojUJcd44kFMac5I1GwOapjjgjTbEGN02D3umcfOuGYpwp+EhQHG39TsJ+apsNR3/uOMPTKJVeWsPuMWOBFUHl98kX6GQbmtN8OMM3jNKebGVvN7jeAev4hKif39sjPt/3K+fv9FH+tfjgtC74mK/Dznl8BUYTvnVl/7888PD47e/f3nhf9sv+cZI4Y/FXYVng+J1CWwGnFjcZfEkNqLdBiPoZRlQhqs94h6HnSUa/lbxXvEkFlvmYd/Zj119BMw4fq2JVn6NT5gmmLM3cUi51/NKRUqThIY5h55xeJUX3B7ldssTVjWh7bINlQYWHe5zhCg2So5DmIIBTJdrTmJdwDptsI4xN2ov0AHrKwULh4DNOeLrXFe0puvNLbvG1u6wNU2lcK0u3Hsw+7BAWRaTAHFC013DYnFgzo5Ae1UQZ08uwcFNGBPbr92DOF9nmG1ITtkVhMXJkxPuMXIAzxq5UgGg3yNAhNNlLiTA1wgQxIgkSJDURAVgNFVwwBy6fu0lcTVApQyWMcSP7/HqMUyWQG2r60hMqJTgkLCtIDg5vB7r7BinxDoNlWztUZxEKiZWyy3iwQozXaSwrJYUlhDD6+K3VeJ3JM7hgB9vdghuo19cRJvAsC2C6UlFAEr7gCMJBtt0gxXKV5NTrCFSa6aSWO5UFKzN4AUkg5fSIMxxLrStJ5LqAs3pMlO6pAD7Jmc3zRz2jnCF0jAWXlGjaUQ7ROhxiaZaCfombzeFBQ0JojViobYiALkISn3HKBPoHX76/LyMSc5xqndXgAY4s9SpVhCJLz90ivTp+n7R1kRbPSQjg1q/5I7hlir8CUpRpC99C0xN3xvTITEcYLLR2yFZXlcCtXOVUabrbQl0xS9dilLaA6okxThdIMdpqFcH6CyaXLB+pXRINgetDTZB6wr5kiVsRylkMKrV62vufdMApYsvbp/Pucnnb/arhJqFPO2Umgh5zqTk1XYEV+Jt3k8KEfzrq4R3Vgm1nWIUl1QokTH6+qaTW2qLIeKgc2KonZUNKlWpQdYFcLvGqlVhZfiSlQdLKeFRlkQLJTQy9LYOTksIiQxKM9HuyWVRjjhOrpZhS9mLCP4zSqfPnvR1i3bHkOjhKdXj1IrcjeNxoYe2UnjnqkAVj8enVuae80SQTTJ0TTZ7r4auMgDrrAxqi40uSGVvgpgGL7oShW2f3yGq1K6tQZlvjnOuR/FWX42i36xRhFPb6wXp8VrxJ07DZYLzfPcUhKaETfN8qbPSOasl8cz9iBlKQ13QtldjpOmniq8alpIMSD/LjBHKCH9b/lzjtTYG26F6Psmq1uNKUhz9jJ9Qjh1rzab3uKeMVOkkpbrwewTFzcy2JohNSRZOcdXsBVmHVuCrCztZElnpozfmoCGfkTTSOqK62y3FWcCzXXP37rsGsODFzxzMbd8GwvtW72MxPzU7u7bMeC+N3uXFRf6gBriri3wZT5UufRUVN+tSVtlpK4xnzm2r7DPiXTddbi+zCMnmUq1OJuH6ri+Oatxiek1ty5xb8KBOlTRuBJLSYF2RqEASF51WRpfe08n/+HPfP3Ye2SxeHg6pIj0shF8HL1ibAElW4qU2QIKD7LSgt0unZdMPw6cqFZVOIu1R6tcjmudEm2RZ59nCngfa3sl4xb/HX//Y76v9hgr8+D8=7V1pd5u41/80PWfmRXzQxvKySbrMdDJNmy7T500OsbHNUwwu4CTup/9L7CAZg42wkrYzpwUBAt97f3eRrq5eoIvV45vQXi+vgpnjvYDa7PEFunwB6R/doP+wlm3aggyDpC2L0J2lbaBsuHF/OlmjlrVu3JkT1W6Mg8CL3XW9cRr4vjONa212GAYP9dvmgVd/69peOFzDzdT2+Nav7ixepq0mNMr2t467WOZvBrqVXlnZ+c3ZL4mW9ix4qDShVy/QRRgEcXq0erxwPEa9nC6bD5//WVy/+fhJf//zejoNV+/OX52lnb3u80jxE0LHjw/u+hqEb4PZe3TjvF9/+PLu4ur2/eoMpl3f294mo9dfvhu7tsf4CLW3tj8L5nN6dBGs1oFP3x9lxIi3OYUpXdbscLPyXof2ih6ePyzd2LlZ21PW/kAli7Yt45VHzwC7nDECWBq7kJMf6vSs42/NaHLvhLHzWOF09tvfOMHKicMtvSW7alkZGzNBPrNyCX0oxaK4aVkRCWhkjXYmioui85Lc9CCjeB/qg3byf/mX4pH+e2X7VEzDF1D36DedR2vbr7FA/7FhQng+DbwgfIFe0ovh4s7+g343/Z9+myY8+pMdMopTVPnx2dxeud42fXwV+EGU8K92S5TAm92grR/L99KjRfovaaoSQsnBWhOYFGc5eUhCINpyyY7ZhxFGEEKpuu9eUNybC8tB3cCym5QLxZUqJ0gFCqQOhvzplBvF0yko2GkBC3pSBQa7lkCDtSfgYC0gvS0RyeScSSQpQMKaKExYS+XDUxB0IU1xqaBKiRTCsFLcqZdU1LaVDnSCywvldyZtSKs8VPnilNLVi3ZUXlhUvqDJteS0YF21sS5Q2X2c5KWAoeBMMZOL6fPQYMRCE2JxSkyHAiWGDHOCj1djt+b9Yu44cwdc3FzBn1fn39DZGQCY02M3sR1S/mg32yh2VvTgOgymThQ5g9oPvUZ9pEmkPupiPjQB5S00gPkQ0l3jqP7iAr54eb5w/NvICelvY+fn5zkM7sISAVlL3hC69vfbaRA6t1PPdlc20xkFdu52AqfCE1l0h8isEx4Dju5AJPFEFtl5oz0A2X2qtG4f7Hi6LO28cqSHwJpgrfwj4AQZkxO891pywrln1DiOEWknkbr8MHkdBEU6SBoH0A4ORJs1RYIbBYdgIaH6kvpbHmUB7UlZ+puQpz8wxqQ/b3gH0EX3CQZW9aBDOeIDYk2syh/D4Hmhj8kLIgMLKS/WYfC4VRoKGFoTZFbYcWJg6PKYoTIbTIFFAGhMwhsSbHLo+gvVbbFuMXVk6DQixOxvgW+EhXygIRyhvWn535L4Yh5tKWbufVdetdqOXf3Tu2qvUIGpQJvAkqmmyTOVkBMy1ZJh/hdBFLnqqjggCjvAqAHgrsD7KOOyTAcYlTYvhi4wL+OGfG0x3zxaUVqu1vYhRiYx7qpQnuRRXKthH3XYA/KxHpiAdDYgYqN9t57rf6dda20EYyRwp7b30nMXPm27C+I4WNELjj97yab7WJsXTL/3Jm4UbMKps98lpJ+6cOIOIubMajOLO3l1Rm0Ex5ucXaHj2bF7X5+MFPEm6/46cNnYWzHqaIKGRcIN/qY/O3usZDHXE9b39ZQSpqWn/EaqpSKndk8iUAVljhhO4NFNKHHTGS3mWGzWMzumKH2dM101ITN5IRNO/YF+MsbcEF2WkEHLnJC6cBBTm6DDBA3pfG+Y622HsFHu2NvKbWt2Q7T70zHA/KeT7GU7oSB4iDQeogfpt4yPAV7P4lzPUpLdrraJj62wojU6YkDriwENmEQeCBr6EesHalpk7Oupt/CfQhdjwRBKe9injPOEOXxTYzfRNAxNYgD6ty4IIcTj5iMFcpgfNiE56OehEy0VhrvA5LUKVHe8I0TkeVaYM1O6dbDRozLVoTfFjJ5OmxQxepgfnrqwPc+ZMd4vHZbu5AeZE8gg4bJcG82N2HGw8WccNPKkAj+g7uLehII0r+kSYAGS4oDdbGdnnjNnao3lRtFP+cSuXZ7JzPrQDV6yoDUxOmowackHWDD21GRCRff4gc/4MLOjpTPLqF5hAGu/tuPYCf2kBWqIUTkOg+9F2iYcXnXhrjFhLp97dVeFH23jIkcqL4GFI6DRS2fVRXgB07o5LEOhnwgG1DpNianicejN4RqEzIlRnQ7jg7cd0wGS4Er4rBV9ggWjNzyK1fAySEekkt5RBdJMfup4IC8D40YsQIg1yTPM+yKVwA6dDeVj5AnyRRCj4YlRyf3RSLtWaT6PAGp5/nSeR+5hdtI96uoby5ogfs591Cl3wg9c6NS/TFRMllISzDYeywBVNpbprGVgXy0DdCRt7OK3lilwYLQ9f0Itwydq6dVBvdB+UH1UTwAN8aIW3NsAA12vs1Fa1I+b8ynYsEQC0x81zemVfR0fMdtycpRhi3RG2WAIEqTXPY3hSM5YY8KMdTkYqQkyF086Gkn4oV89n4azPe82ePCdMFJ4Fq6zGScqmXHEWV54uBnHWofOVDXjGClqxgXZjU0lNF3abLheDd1jNi0r2buEY9R0HsKP+1K/KPeMWNCgiJppdXX26xmjr57RkMaHcwPpmSJjq+qrCISir8rh1sjt6XevC/Qo/IES4c2PaRtFihPL7JsqHbxCXhrFYtvV6kmYZScNfQSai+s7z7KjfT1JHrTWBYPWzzHTnTQHF9gqwGqmuybIij5pqnseilU4Y+YwtmczlmF7SxHq3dnT72pYl7Z1pnuNi95zxPuMWhdMeKbJSlkEw1gXLoFxT78KpDPqkBPEZF18Mq09D0LGNHu6TGi+cKPYCZMraSJyQyafzYS20XRJEUJ758pE4XAzf3U4/SFY1dpkh/JT27nsDTdhNs7UNgYNJxLoA6mPRr87FrOP5D2YvI2Cg+XSJ13k4jl1wyl7OG197Xr54xURDtaOn9+RlfDS0zuyM7O3gO6VO9TPamkTQ8txKMNo1YWDDWAAAxV/4GFCZzRLtiDQzUntOwJjNCMvDbd/WON+w1JkhMXkTSb6tXABerpz2kTXTWlrAyQBAzVHJw2z2/zI6MBAqgBDsCzg1wJG7zE0iguJUY4UXIDm2oGuqXhj40I3VMEFP7NOfi1cdE1NLXBBihD9yeCCmxozdDXtBTFVwQU/X67/WrjoPbc7bnghqJbTfwy8iQoyXJbJYILIJxMYv5Ygwt4KOk+lfDr6uTlJYGA19TMkquhnPpXA/LVg0XsJvA4sebk3knBBmrhAauICWLX7T4gLPiPE+rVw0Tt/VtcN/NRw0ZzMM6CiuFBm/IfPTQHarwUMqy8wDA0/OYPRHAAygJrAMHQ1cGFBHheKwyKfep9SWDjh8DjpXWfBkFhcSBJOYDOpXtWBUlOVgMMS1G9THSkDA6P3zJqBsLSlwbIMCDImGtQBdeg1aJk4Z3v2GqiTSTWxvmOYLmNZ0tGuWYsFGgw0/OzCgKU+PZbz5atV5B4AU2/WgzCINREkeYo2y5K2UsASrP86vtZtzgnFit2LmMAKTJsV5EKeIaMu97aklFjPGRI6U4fqVuWhYRoiaIy7CYe1q+b6MPWIqUl+AnwACqgofuhK0fq4wmUh+eqd/XF47wADmwavrgbyo7ht0bip4c7JFyjzQMrZ3KMrN+54k6Zj8Ufv/DbuCXUCC35kariUPZXEvvfALEJ6XU1JS7v4VUGgTHpesUJBSn6eSijonVbxRJQ/BE25bzoKCsm9Mul3xW4cUhYyqCT3vdNPUVHWQGm511BT7k115V6ZdQogj7/ljEfk4a8yIRe2IBdymaKNL8YdhKDSy7Hhae1KYnUtQVtI3AmW7YsiboKF41H97a7et2vJS/NA7gB3KzVVHydRGrBQCNhRN8kEGj+OC58WYAXbW7TL0W/AygYsPxTdFCVtmpWTp6/QAt9jZHce10HIPJa64NGw7TUne89m+T3QTU4xGESoGIA1qmIQ1dZqcEH5VfcF5jtoh66jqOOsuwcQc3KhG1QuKrA+cHMMAHlbZJS2aDQtIdixYLjNsln1DmWMvwm4mv5sgsSorDgRbWU6qiMABAMHZem3aOtP2V6OK5tt7vEaK+oM9MF7z/RFVqZH16UtBAEY8KAEHCg7Y1zgbwq6U2A9EgBtkbt/d3zwnqd1KKMORLzRFQjeAR+8o0QBPI1AoDP0c4k7RSCAIZ9JYgwTCBCtb9fDmfi2DQV/73HeVs4PIFFmUaWgX77S4BS7nLftlfr8Cy0KtnVjNC5LLfLzx6NVWhTPnnCc+ct3Y5cGtz/Z/mxvU4NIjy6C1ZoGbPl2lqLIerPyXof2qkN0nf98lt1DL+Q/HMoMmykdBSaUYwfMvdpaoTpZ5N89Thmtbb9GZv3HJmAX5oEfn6XDE8zNBnD9SP9JCKexepiLkG2ndzYNvCDM9l1e3P0B2f5nWjp4AtnOb/kx0f4s+87Bg3InPoVpBV/pZ9XB2e9Tm+/Ke3HzBnsau4Ffeacr0MHHf8fAJKtrMO4LlfR/gGDqtE1LnMD7QaRRWP7w7dPRvp4k+zbHpz8/f98GQLYzZtOGGmACzNKGKubc8EPXgm2BbpNNBe5cX41wSIxywSqcNknukUCEeaYNFR5psLkKhYoQsQYY9CympstcNSZTnZRG/9QKgjr9jN2ZFs0OCCb5146ZbCGUmA67bii3RR8AzRQdAiBVRFzRnxG26xMSlU9Wh8ru1XecwumfuSWvnuaYCkc3n5LCwabR0sHplE/brNE8WrFJirV9yBDEfVrnXRF1xS2oxgQL5bLqOcExFRYQDO9VXVtB6EZ/fHyWTbUz1ZbMtvPxaulwUa136/ozSrNkXwixYzvEO/NafPSd8yB8sMNZuvsj7O9NPwUV3Lt0B2Tm6BnoYGxprT2POyEmxpVgfPVp7KwIoAEnsBHvse0kSLm7omCjk9E2VxSTG0pSYzi9yOyR58TO7Nanj5V7Ne7ULEO8m9TffRs6bMDsNg5tP5qn70cy359VlExi5dCxZ1vZ+jur1VeSWDaFrapzHm/XjuwX5tWl3CjlZmqfdhJ1GPt0ZGmc4QxWrhR7jFJogo2w5FksRG2kZVg61jVgGIfuigJAcxs1DA8yWAMEEEj44u4BBNLMenxdKydyugBCkOUwQN5Zogce7Hi6VCjRhAsioEFEPBkl7UzMDH7OLK+CpPpuo911V++tdfLNop6Ur63pTQVwmK+twNgHJFBN1SWo3Z1N7ur2ik3L+3fRWqVJ1N/Tzc9gurnVI6uqP+/VfzP8Rvv4QYNvl9OPH799WK76V3lDlsWPbQ2l/xBoTqwk8emBOo80d8dDiDo9ABgYpv/JKRYKcPNXMGerj8rjO0DjFxAVy5VggFfpPd0FQqChCarwQuPXnY/qZEF+pNZKzIbqLpZIx7QKTZ/wEMmbxUbNYUfEsjyHcLIQ5nDb3rO6GgcabR2cTgFBfkCwlkX55d9knki7qifl7HQg6p6B/YdWOAPCoz8L7yLxPOb2yvW26eOrwA+iJA2zdkvpnGhiH4kkIAjt9fKKfrrHGl4lrWFA7yzOcvKQhEC05ZIdsw8jjCCEEnrfvaC4N1cCB3UDy25SLhRXqpwglYxWUs9pzZ9OuVE8nea2stMiu5WeVPNb2bUkw5W1J0qQtYD0tkRRJOcsl4AUua6sCepJS+XDUwPShTTFpYIqpcYiDEHFnXpJRW1b6UAnuLxQfmfShrTKQ5UvTildvWhH5YVF5QuaXEtOC9ZVG+sCld3HSV43N/eIRGTCUo/LRGQs0foTQJqJyECQiAyy+uwjGXzRjtkKxDnW70zkZxYatu2Ystdryy3tCTKRsd7IH4b5+u++LhnB+3qSnIkMBcsOfqciN9S03hwJJ6C2yEq1NGR4fH75E5mkKMpZFvgxBeZz1HgZ8fEyRxnajbuOOjgldrR2puyL5+4jq4xxPnc97yI1CbSjbJxA5viE1pR9I8/Jr5XI5gkMpRFYlOLyjAiMkXhOZQR6tw3BVsh9sXSo5YfaJ5taJVaHZzDnu74IEEklPD/yJnC9y8X2QyuPK+27/fX6/u7uy4/rwF7+/Z/ufxUuAmwQl/mF6yOJwpJ7fJ8Kvn2Xd6u1Egtxq1kBMMvlOVVla0IiIhl1VovbjyGb8HfyZLthRQdo04W3SYu5DCaiZhcRbWXufhE9NT0F/kMLAV9OY6Yui6DhH/vO8a6DyE1CpkrwIK6e1eomHEnI7GqzgA4reoYrM7S8GCOBhm2Wzj6E3PN7/8NX7x2Z3bz/EoHZ/ccf2w87Sx3Y67VH6ZWQsb+/Rp8+3k0bhv5WkwEG2McAWZs9CBnAexTVIoZpct4wwbLz6Mb/0XMaCmRn32pnl4xiWn6yrZxcO6FLfy1LpLvsr3SqYXcb5Kthd5usnmL9LwdjBMQ47r0eWOvb83BR+fcL/Pfl8v5dtNxezOfL+7+1S2Htk90438SupwrQkdEc0GRWqnRloaCKtiykCykrQHq9YCnVmrczZy15YrMrVNsgWIVqmxT1yJ0gEvceVh+9fac1ocYJu/C9u7e9bD7P3Oza8+MXSJ+j2SK4hKv7b19CN4zJ2//XzJ0OylF7Ug2yCZUstXVCB0XEAJHakrozQ+6ngB5einw11yacffZy0KVlbzxLNdd8cauaG0VLbX+8C/+Zhh8360//bl69m28u4bS1YpwSpcVkKSuAELXd5cSCKVjdO1ZpMSFnROqLLdS1Z7PbxcYOZ84sKbjp5Uuvnorf1SaHv/2ujgpJxn7Ew/tmgJidtd4BGu3cevjx+Hr9efE+wtHrjf3m6jbsFQeqop4MrpBoqp7KIBDwwi6rWIqQqkJtlEeBVZWU6H21FFObwqkqpjZx6lO0GhAoSzMRnQ9/MDem3VUb6bhLb+Ou0L/7bF/i+dfZ6sPy9bfNXzef/5ot5MRSiaRmgqpSYGVx24sABIXTRrI0gJAHOzRAfSSId0xwGwnHwX8brqv4bxO9E4zdKoT1A3CMgtUHc3WuW/+3dD7/82NO2Y7lRRuFvKkB4aQuB49hQLKSHPTvE1YyFrJmJ7zH3puqK6rb0FpFdZsgdg83NCBvL0se6MQaEuii3sY16t772dXl1cd36NP25eLtFZrHgSGqaj5M3Y5cbnPJTepabbPCPXKropTvzupbhc48dKJl5f1a/xzNcQDVBpS6mfy+8u/eXC9+/j3ffn8TvnxvbvoDiqJH2ppTzFtO83BAAaaDa73pVJeTii6HnbDVfwUY4uNpY2K0B+GCpyxrcoLdktvE5JlmTg/kPOic+BpUerXCCQCnTJ7escELn/2UZ5PdbKPYWdGD6zCYOlHkDLoDgj5CZllupq3mDgj7J7yEq5Dyas4S2MDXtAUT+EsVrNj/hQwpxVfVjkVf+HtR0xhTBy1zlB12l+paUH14T8PkjC1BBzsaFreyUdDZcBG6kDlPcfQcGHnhuTLW5jSxrLEy4U84QUHRoXwPq140AFhgQg1b4eqKiiCPVU1U+BOhwOLlMdj49dyHjrXaMHqKzfSg1VRREPA59d0rmPEaz+C7k6zydi/dHmosAI1dnLV8NU5vm2/iDVU1rj9zp470Yrjl67OyuCtndccmWWI73rS+/UnjsnfFVEgAPww8EFQt3jMRQGscpJ5s5IO6BiMMfbzIynBUbi8LcKBX/wM=7VzZcqM4FP0aV808mEIS66PtJJ2edNKZTiZLv7gwyDYJi4PxNl8/wiwGScbYBieZSh4SEELL0dU5V1ciLdRzl98CYzK+9i3stKBoLVvorAUhUESR/IlSVkmKJoE4ZRTYVpK2Sbiz/8VJYvLiaGZbeFrIGPq+E9qTYqLpex42w0KaEQT+opht6DvFWifGCDMJd6bhsKmPthWOk1Sg6JsHl9gejZOqNajGD1wjzZz0ZDo2LH+RS0LnLdQLfD+Mr9xlDzsReikuBrQen+bPl9/0+V+i+OPxQtQe2nFhF/u8knUhwF54cNGDM3X+zRmt/JH0t3Gt9h8DX05eEeeGM0vwSvoarlIAR4E/m1RsQdLSOQ5CvOQNrzFIi90gSGwP+y4OgxXJl5hZOxuGxWbQ9NSmxrnxksTUWBJDGWWFbcAgFwkefGyur8eGY7zJ34Lx5e21sThHPmgjBpu/fNsjKT1nNg1xwCBFDGQSXc5c5yIwXHLZXYztEN9NDDNKX5ApRtLGoUvacQaix0nnNDFKT3sFozsW8dIBpBFnkT0JkL9VteP/45mPr5YDwMuLfw4crpEpDqm2OyAXo+gisI3XvukHOH1CKsoeMkDnEKwdpvSplLySGKSmCGLuBwDGOlUOprLYEKSAA+l2a+yYoR8QsKLu24QafxgD7Nz6Uzu0fY9kGfhh6Lu5DB3HHkUPQp8y2KbghhTaLLyIA6/SFLyQgZd0vkNSXtYMcNFSu6SFPamlnpUZJw1nhjP2rE6kbVGa45uvUdLSDp/IvSjIyd1z4e4sQkpMb1a5m1sc2KTbhJDitP3GaOrPAhPvNrXQCEY43D3LsVXQ41Ie0pJ3AuwYoT0vKjZvUJPSbskohBvjkUHRehBdRNzH5K28RNIF6TsKikFgClrbWNbFSmb3U7u/HHRvbnre0/DZ0OfuufzAI8oebHW6I+z1pzgg5hTdd7sZfQYbioxTLHtOJ7Ec2w9sb9R3DY+4PgGfcLeVT3IVqjg9MauSXBgjCAgxQ6SJukx+A5A+zpkZkGUeNQvy+hUx/X08l3AHlaVqIMRsohhuRK/eYDrJQTydGF4BVeVtFrmY3aHvhe3p2sGOqAjAybKVuAriwDBfI0fNs9qm70RkH2UJRoM/oKRFuSBpvAglfXMti39uys5GdG1vaVPszHQmZs4C7HzutTVyO0Jyxn2JM5OJ0w+Mxdr4yCOxzHj2Ic6a6U5k6a5srr4D3QEZCroi6ZoqqQpCKkSFCQGQchj7QU0VinMLKFAWpGocSIbFWOWyTaIM0+29yAQ9rStxqba2jsovKdTMjBtwKB+P7q7+hr9f3h4evUv5duDb5vP9Xo5rYtcfw3uVRCjQNCkKui6rQIJa9JslSZHrv9ZPklykef5sTtE4bEiQCNtGzA8R1zl4GLJ8BgQUP/Zmbp/gHK5dXmKWF2CbpNVTrfQ+1RJvsWM4Tt/F7gAHJ6hQiR8blpXUSapBzVapxo+nRE/8hbeuETZboxY/nk0sI8RJP8mf0CDVyc1WrVep+hg/7WNJbRkN75TahPxEQVJlluBqUl9ILTYgrY+V5VbfUdAHEVqISpR283baHH84JDPzyNXRD2t09XolX0Ebe9fW40I7H5lV1NgcG9PxRxHhLHaeSTDhZz0XREKMjcpcDT5ec7l48hYmLbWLksBGskgR1yuOtY68N4mUkUOeRMqspzqJiEBll5I1kQhtGkBWBA3qmx/tME5RtP3K3UIx7JKj+Fotc9w0x47lXXcfnOUldr8//FRee22JDbozdpezKc/3oii7RWY9thJbzJklsbBgFQfVyEopTXjOP92E1dZ3q/wdHVjbhOiS0pIgXZsYiwhOGKcr267IT4SyfPssXMto6diZoIOixR5m+ECnZpRcMXS3XU2PMO3LZ+vSerpC53jWmb7d++3291UbctY4X9E9inaRXBzG9bZLLrr3jsE9/qBCjojCWDg/b8Cr1H6rKyiQoN6UgkJARzhSv/YAR5wJllR0xRvyfvnws8p4F9UkXkeY/1rbGGVfR+xHR7tOm/1opckZT8fzVXbvD/D2q+vwi/lIS5w5TVrwKUL2VND9sBYyVdD7AYa53jnmbQnU2Y4GkOpub+HnZueqbl3GzrKOtMbYmYptAHQgNyN1R0EfJUiinz5IYtm/bLjo9TuDwUX3uxy8/Fau2ipPJjyrfolQCxKBGjmytEUikMSRCJHrBh4vEf/8enJwe3nv3qD5Txe93N70v3NCUUQhPsmm7pdC/O8VopQZqiuECtTG/HeZInZ0qEIo8o6CPohCIOX0CvH7ZvH2hryf9tmP4exRu8Uv/963We+WMez3OfIrpSGcnLUpvGN/2WnB+l1/ltgbXGQVDv1uUdDSIdzr0O+Jofx/n2U7cliyzyvoyIMmCTLRpjR4pXLOaXDjXZKANF1rPNzF2TLabMa7qzTU1eBeURnweankSmDVxdT+e0Vywu4NKCWrOwJSqQGuLJZM0EwVBaqsDxDrAmXkgeeRMezgjm1EsX55evwOcT3zX6JXt5Iehbuz6S+xC/Tsu4R3mf68LePkgNUa3viYysdggLKZvZMBwN4UIEky+8VSXSc+JSZCDSUBHkgC5KdCaTW5zYhpep7AtoZ9JOpMqyoLWvGdbQ50gzvWZZpSumHtOPZkWsEzNKaT+BPIob2M9rO7Q9txevGClhSE4mMjTTokFB+JoqBLqpQdH2BPrECeU1nHZzm8/rBY98aY0AQU7w0y+gSy+rxz/QTeeSoD1KkNyK6DAA9nGQm0I1Ab1CzTxwuhmtZAjUGps/40A6Z+YiwhRzW3BAu/godfwcN39ofKmLe6O4Q0xIpFXSsi6ttcKCv0IqaqLyTBnUXV5ghtX8dtd4Pob16qekHNhxHZfab04MsXqX2R2qcgtaoHmupnMJp2ANIOXs7J9FliTlkHf7dMbjf/4CTOvvk/Mej8Pw==3Vpbc+MmFP41nmkf7EFCF+vRl03abjKTJu1usy8eLGGZRhJahG/59QsWsq6R7cT2ZqMHGQ7oAN+5Au7AUbi+Ziie31IPBx0deOsOHHd0XYMWED+SskkpfUtPCT4jnuqUEx7IM1ZE9Z2/IB5OSh05pQEncZno0ijCLi/REGN0Ve42o0F51Bj5uEZ4cFFQp34lHp8rqmY5ecMfmPhzNXRft9OGEGWd1UqSOfLoqkCCnzpwxCjlaSlcj3AgwctwYSE01/xzPEbdb/dX5te/ovnf3ZTZ1TGf7JbAcMRPy1rJcomChcJLrZVvMgDFsmNZXITBwOWUdeBwiRknAuIbNMXBHU0IJzQSXaaUcxoWOgwC4ssGTmNBnfMwEBVNFA9ckVq5ZIfXBXmqFV5jGmLONqKLas1kpZTVUNVVLnmoSPOCzDMdR0rX/B3fHE9RUJA2wxv3n7Uv4/Dzw5zNnw1jZsJHqiRShtcKxKjDqSj4ssAIepq4lOGsRQy0a6wJ4wIQav0KhmYPFB5NM2qY2g2YmufCVKthKtAYCEqA0VKgeAXaUKtq5k5lceQNpLuRtIC6T5K0Jvw/UQc9U9UeS7WxRAxklU2hcocZEavFTNGOk1VCF8zF+62WI+Zjvl/9sFdykXXJF0SZCZ/hAHGyLDvRJlkqbneUiKXtlMhwykpUU4d0jeqroteqMDLtPYxSEGqMtqq1W+JB2kb+vA4gmbvDGfzXZxtI1l9umix4pHcGQx9HkwQzoU6yPhzu7JrltptSPLKskurGP2Ek8ichikQ0Ys2e4CX+oldpiMt7DEMry0gzejbQYR84pnhrOjBrHkMzzSaX0TO3n4Ds/XYX0ijUugvZI5n34psNaJaQto2e5Zi2Zuh9+QZ6HWjQ6JsvBHRTelGwigJ61veFzKOGAgjeRalzTp36jOetGfZaT0+bURBMQhxOMUvEBLWXbOI0Y8K0OR1vknDEF3JU/XhLPCYKvT52tMWEYuxos5G9sUNpJujpwIE19fso4UQIBW0K3WLZIXl5wnplHBu0TqvaPbPa3PbSCVS+zmZDZ7MEnyX8wbMZsEraBNgThlbKy8rc7QPa0qF5WG5LFqy78nPZknEqW6oyeh+2ZFzelKJV7JPp/xGiS+8x+g4Wj2TWknQkMYreZEvGttlCodxlR9MkLqeHKf9yLHxxzBmNeDfZHuPIITU9Xouf7Zdgitwnn9FF5HVdGsijANmF+dPfdKMve+kCGiA0LC+b4Pf6jNMU+qQzrA2huJCMgNztIUU+Jql2fa9IDd/3DNs2JuXZ/io+vNF8j/bh9U3PiRy4Xdlude1K+n6oA7fE9gEahmk6um3bsG+U3blT3MUBx4Lv0btrzuXde2zeru6XT/1bi30bP5ozsJ4MGt17ReGlucXHqW9ta7g7KEfTjC3otClifXPecCYK9V7Dfny3d3zLvrDNmApg/cNQlMwoCwX5fpsLjrbbOzmVCGf5YQXQwtH0FUOhKA5Xc8LxQ4y2Nr9iqHLunC1a0/qyIVsslE5RBdrxNso2+ZlWue/fw/9MwJs24hUwBRcSJwdgiJI4vaiZkTX2ZJghQTBKY4hgBNNz4nMgmLXavfIxiFn3tdlFQBFe/VzoGsendh81zRJBPJDz/1UTrfQWQZ0piYnCi2czbTZSzGba7Lx0M9DE7+fdDGh22XY1U+YZheeVyYyu6RWvYBk9p/DY9kH5y6nygWOu/k50xnwa7yqeCpDQ6Rlgd8oMnfoF4MVOmRuXWA9uN+lVIBgFi0Tewp0ucXBALW94Y0bXanS22RTHXnG3Kqr5PxVSHc//7wE//QA=7Vpbk9o2FP41zLQPMLLkC35c2EvSSdptNp1k+8III4xnbYvK4rL59ZUsGV9kWEjNLsnUD2Adybp8Ouc7R5ceGifbO4aXi490RuIeBLNtD133ILSQC8SflDwrydCFShCyaKYLlYKH6BvRQv1duIpmJKsV5JTGPFrWhQFNUxLwmgwzRjf1YnMa11td4pAYgocAx6b0SzTjCy21XL/MeEeicKGbHkJPZSS4KKxHki3wjG4qInTTQ2NGKVdvyXZMYglegcsD+vj16wfnYYG//Jb9dkf5iP3dV5XdnvLJbgiMpPy7q/40dpb9hP3pbMIxeRx9A2Mw7ltI1b3G8UoD9okkdE2E7HehC3rk/LmAU4CwlK+rJL5lOBGvo80i4uRhiQMp3wgtErIFT2KRsmS2Rt0HUl4gjWTqyHHp8a8J42RbmVU9zjtCE8LZsyiyKWfYc/S0LSqTiyx7YGvd0noV7r4usRMvGr4ToPQNJD8znGZzyhIh/hSlYQ+OZcOx7ExKNhOWyzoD2LKGBsI4jsJUJGIy5+cAXOfahbVrkrBsna5MCEJw4JhTYrnnmxJgTMkhuK8CTpkASQ47EgTyAU9JfE+ziEdUYjilnNOkUuBKg8tpY0bOBTOso9wGcovSFwzeObxWC7yu1O7RVLyE8oVF+GkSUEaKHNHQLtOYjFeA0Bo2MHQGoPJYlm1g6rVg6pwLU2hgKtC4EhKmaNkbTeaMJpMgXmWcsJ4nitxah5BsautOjUk6u5IOVspiGjxJ0TbiX0UaCEtVqcda6lqiCIrEcyVxT1gkEBA9UrLT5i+jKxaQA7BouDlmIeEvqySZ1YKCg46iUAhGYsyjdT1saJtfXds9jcTQSgr064plqIgao/6q6qcbFTneCxUpEIyKcnXbDfH7NdCMCHpj2LsahSSdZIQJdZLp0Whn66y0ZyWZReumyCSE3PtNEpyK+Iu1s8O++kWpWhOvzyKmvxt4AKIh8B3xa1m+QSKW0+L7HGFZ+Reg+D0Tq9gnMLUOSy6Drm3k1ID27IHrO55lw6H4Rb5rAg1a6fqVgHb2Ap1rbAU995+VXDiMBBC8r8M0yfN5pLbLLbC3BlBl4zieJCSZEpYp4m83iW7aRCpbtTfJOOYr2So83RBPcUIdu45i0fmS67CPdB1aM8EAAh8Z6vezeBMxKfi5UmwpC2T7Owwb7XjgYLeaxQurLW1PdaDxddEbOp9n5Czezz2bAes4ToA9YbhY/N2Cn9KW0Mm25CL4arZkd2VLzYouw5bsCzElb68pZUuc/idbsvNsFydy4Z1Os2U9OlT1133h3jbnNOX9LN+3lE1acLkVf/mXYIqDp5DRVTrrBzSWuwP5Uiyc/gLtoSyV7+gIDSvfHfCr2WMVQXfaQ6MJXUtUCHCQ71uUbUbNopeK1Oiye3hoXVLv7Q/N4f6pHO6ci8C9xmqr7zXC92MJ3BXLB2TbjuNDz/PQ0K7TuV9dxAHfRZfI7uIZIL/yDF+d7ed3Nzd/cGsyT669tR/cvM/6H1v2WtWWNxgvcBqSmWENl32SUGhMuWld4I+cAazib6r9niXofrU/dsnZCry5CwsPeMc3J/f/3WAHbrAbx1IcDgXCaOTGbeeepmXX9hB3nLJccIZnWy7YDcIFdhvhnux8hk693uHhajvyNnbjvAMicMB9vPy5C/Rp3ZsvLyzT4xiGIaqJltkRrgVnS3X5YB5tha8SRh7F8VhZsKgIqdOhc2502g03U5z1VRQctrgV2IFbaYd3/+nemzKjVawM8ZronVAxPvSDxeBHn2fBS6JGZ9hQUl+GzJXne+NyaERZgnSrLOV5PwQ7gkOfvyFVwr22fOHnP2b8bXnyrK1yBHR0/N39EdB99Ncjf38/cr0P8N3USx8e8VXLQmiM41jSnwFgsQRKKT/CRylivbbsFu5SV1Aat30EEQZiLj/LvOu+e07v1biZYnn+wAZV+zUmqe1WRRfOrHVO2nxZM1QoST+lqZyOGc4WMhbIwa/Mg5TfYy5C5jSXQIAk2JzRp90VQ/hG4fUhjTzlUkTb5TmnGyfiNnZzGmFwk+mP9iFuvdq+7Qzcqm+CR3mQlylXJMs7n6p4eXMW3fwL7V1bd5u4Fv41WeucB3uBJG6PSXsy7bSdSdvp6cx58SI2STy1jYudJumvP8IGG6QNCJCE4kkf0gRjGe+9tS+f9uUMv1o+/pKE67sP8SxanCFr9niGX58hZGPXov+lV56yK64f7K/cJvNZdu144fP8Z5RdzN54ez+fRZvSjds4Xmzn6/LFabxaRdNt6VqYJPFD+babeFH+1HV4G3EXPk/DBX/163y2vcuu2m5wfOFNNL+9yz7aR97+hWWY35x9k81dOIsfCpfwf87wqySOt/vflo+vokVKvZwuv47ejl5P4m+fP7xef3v37d3ltYdG+8Uu27zl8BWSaLXtvPSn8y/bjx/8qz/+uP/y4/rLxSRefBylb0jX/hEu7jOCZV92+5RT8DaJ79eCj5A96o8o2UaPEH/D63zZIwmp8EXxMtomT/S+x7L4ZGI38oKMLQ9HJhI/u+muwD87l9gwE5zbw9pH4tBfMvq0YIPbTCoqIuv01/vl4ny6jZMzfJGSYk7F8X14HS2u4s18O49X9JbreLuNl4Ubzhfz2/SFbbymV++2S/pwr236K0/6elaytBelMbY4CmOAwDLo++fv6+WHL4n/ZXIxu7qOF/b2549MfEv0fYXOzi9uo9VkEyX0e6V/X1B6uAv6LBfXCf3tdruj0P7KbP6DvXSdX0jm4bfJNE6iSTJf3U6W4Ypu/CS/jT7wNfdWbn16V+kjGO7r4BmyGKaRsWch7FuBQ3/adsDx0HYcnomONXZ277Dyn4qYavNMrecMzBEDKB2QsRs4nk2Qn/60EE9pC9gu2ijNa3JKnvPUAkfbyfIpI+6lVUdMVhcdlFS0mp2nxji9toin39JLj/Ptn/Rv+v2yv/4q/fU6paOV//FU+OMqSub0O9Pdt7/WkoWb+D6ZRs2Kehsm9Ks3K5xoVvIgKgXCGvMszy1QEi3C7fxH2eeAWJytfhXP6Xc9amK/LGsH1Zwvsf/O2buKRp5Z6KCdqxbaE6VmofzG+OZmE5Xu2UnlgSzdBRVXqoSdci2Ipvv9PnWuLuiO3Y7CvUymEr2IbrbHV3MtgXYvuuEytZ+r6036XxJt1vFqM6dex2S+mtGdjy5Re3X+z9oXdtt9QWzbP9mtQfkbPhVuW6c3bKofmPkY16p9Kubu3IAft9z+41Vu0r8+eh+j3x//2Py9uV1ePvjo9uevkDPG2u3pXbi5M8ZcEwSY6+I/TkAd0Fr3t84gPav9oIIKQuPMZK+owtOlrzK9+nqnVHspnTplUlQ6dQInqnRG1th1LaJM6zho7DDqIvW0C/9QRzWE266s12TD3ObF95KKJ73ydi+k1g0NdpH1LnrixLIQE18m4ZL+evFwN99Gn9fhTnAekpAJeHN2pqE8fSHfnqiDTRTQH1oABfghCaAUGOoZAb64CABfIPWpjlaQPaoWNP3gC6mgvSiNUTAmVtFc8RRXBcaAXwiwVycPxrTmIQDG2LYZYAz8DSudEM1gTG9KB+l2KYAxmKe0LjAG/IZ8jHtqYEzGwqL/V6e4i/5fncIxEYyxqLAxq3QOOoG1DPDvIEdEHiRjUYGn3uJ8GtGw+dI+NfhF9k5AbXeCVvhlmM3QE4RxDh8kisMgwrxDCxQDyoPTbLWVQjGtjTUO2BA3ODJaAwADfg3o9LYGgIlnOpRVR+xFVOMgQY3jtNM4A2AvPZQOgLcYaYMR7zMeMZa97cxQlt+ocHISOAzMIq4bhoMOkEDihhEwC3EAmMXVSitbAJIaFGdxK4hfTWSP2f7YGlulf7wi04u0APb95KGW1mxEFsdHczJfYL5Wexya0RYJxA6cMQoMxltsjyP2qQEuGRcbnb5chTcjLi3dPqWQCytwPY75G5cywNnLA3ZViMvu7HgSP6xSVX96GS/y94Lbdi+oBV0M2A6tIRf2g5oyX9j7bUpVzGy2gSCX3JkYDHNpbbIJe0DijHkbrRVwyfNxTgJxEVY4QI5drYSZgrlwyXEH+WmtcUjjUgYYYMRHXRnaskdXMqglS28xAmsR1wnD4QcI8mtMxFqQD2AtgV5a8THLVRLdRJQk01QC3883W0NEz69gQ73o6SUnT8064ulHrsRpOHh5FvgFgJ198jBVa54ZXJ4FfkMBP1cPRtWb0kEwDoxGqHh3+NQQqoyHjU5ypqkbfeRsPSPwqdOsz4IltaFWoS82taZOzmLn2xyBqT7K/J+1KVrGjS/FWRKLs7KgcXh4CjWbbaXwVGtrTbgsjIM1HgqfshvqUBl8anM/nUabTZxshtBbHWErUeUTCCqfXPBMQa34TCHUVQHxaULsUgaYZpd3IjkJGwRecZyyqhy5QKhDPCDUsT1VGxwb3rDlwExhLVoWUMIrUK2AAAYQl1NHBNrzzA4YrhmOCGAe5ynEqTuvYnI9Xz3rSDXnYqNdxKLHOfleaDSMCoygx9itrhbQqV+nwvzJMm1Y9Mh1J3xK3Nr+m9u2AuDY1dPp1hLIRZAYvRc7qqxz6/hPiOQVKI0OsTxBnHidhh5pG8kzn+LVR/LM3TYmzEYbKJIn1Viblki+vcojNh/JQ0pPayyf74oCGQ+B+857nK+mSbRMabHLMJGjgqQrDtEwPJeaIbwNLk4eeRbYaae13uAEq2llxR6Jw3skWYrIVUiFZG/4dDZAwR2sVcsje70RustvW456ZsAZBPNxGtabWkMgvNIkOANVUF84wWHgQhwC1D6dPJ7RmmlchoNvNp5B+ES/08Mzck3Q5GHkOqTZwyDDeRjMmWKew9/3SJFdR7H3QATKv1TjGb03N3RIpxfNgLL6ZKAZhL542kUyCpRC+yIZB2iTfRp6om/mAal9KFMzD9zKRpCfdulrasMh0j0cEleFw3n4rqn9ILlwCLm8mwdXGsiIHcF+sQKh46DRUN+GkBpPd0H6Au7LyQdDvdsSGnS4CzK1tkr+RGIhoDlSnQJpbEwt6vTo6L0mx8NRd7L756unX/zw8b3jn3/7/fur6M3Nb0S4lbzCQKj3xrZtfYEQSETFOdnU/0rC1ES2OsmV8QA4f4Bksut6T31Bus4JhmM99FLdtjIzGNOsqnoGYw2Hx2wwhvUHY6AEQC6wzrPj1lqVbVIAaVVV58YgBfkjFYOOjQGFUacIGhUGGs6RwWyvjxEbF4qfEjcupRjYdXmhUVq2bXUHPbp1Z1QWx8On7AIp7ZoC+VYgSjVYwjW2GXkOHwhqPdp0TnucgxzGeQ7AODJGgefagUXoT58vpBv2cNPhXYDnHNDX8bHxGMMB7GDtZhjAEHqAIfQ65kv5gCVk11KdIVVdQqcgtFe2y13fHzpr24Ggf0VZ2ycwB1OTosCCikJLiG2I7mgdagMfhYLap3PzgvXDBnWxKZ0BHT6dSFXQLUffBZ6QttOaru3wh0uDxN391Iho2kQuMgP4Gy5TnzTyXb9rXz+PFSRgLdX+Bn9+pWMMpV2OwJUGj+xJuwcMroXHVHrW2Ou/X18/vv3+v/98Hd1+D37+/fXD9P7j+WpETO3qh8vUcjweT3RtkFhBeeSqhAANppxAV7oKytVzojvpjKENUD/+S2q0rSvq/oW7Zz2/ek9/vo+phl9I3dKOCKhWz9FWoFo1pVURV2DLaoLV+pIRzo9xGCnN87w1QGwwwVu10gvXi/4OoizCemXKumkSi51BXoGVp84VCAs19pThLMJfiAeICxjXeq8r9gS9xHUENCV4rWVb0e2s3dhFt7NWIAdwOwljl7GFx4wVEfU6Ha4LKreUPKcT5gyPndtjFmWVDLJKFxqgshReUbTu49hz0SG8hypLjpisApJObusoR27jUpIAD8SoUz8Y42INq1/7nOy7EfNuz2NkXD0OAgsKAnYF4lr5P/ftMFzoboTsS5MWYB72eJ/55YbL1INcXW/WhdPAzTpcnUGo90282o4285/RXtRstE7D/t07retw+i0Nplaz0TRepP7sDlC6vf4XSuMmSgT69BZK9VX+u2P9m8fM90ejBf9s/zTl88p2T8h9RLbKPL8QTvf15IfPnLO3SnkOBZS6qH7C5777W9c7ofxAVr0+wJY0fQAsZaQttIlf8/YBbeFpF45LCjMJG8CPrWKpBBC/a8utqLX9PbgKsDD1jSYP4XZ6V8VCA1jjOmO3iK3wSs1WdV5Uq4gbkcKbhBoRZF3MV2EiFS/0yrNTHJ3cyIs/S/SHIBjCqXFpHODBrT0DSmTfl4DKIzoWAWnlnLswgIUNHCQ44Dgbtk+5vGJQc7rXSjraYgJbIPdba06kW91aQBVgK4eQrLAim9cPqhBamJK1LWP3yCypo5QpyGyvfABXtO2jO1yvWIeZzUfY3SUaQrioYSHFgELV0eH5ahVvw200U22bPH22iUXTof1uQykByvwxYL9niDiH5rygOy/ojs6sLBeYo1OnQQZQwj6xxkB+Re56Uie++GrHDM8Ap/PQjv9afYjeSRcge2woz5KROBNSjjDQIetgHEuRgqXK/fKh2vBhIoV6VnZuDKkvtQMmMAIIPEBqRwdCsic/1hgX973NEVZr4OADx0KlwGESrlbPJXioZVmj3co3cfNQRDSY4XKY8m3kd03qYLsFsAspjh6AWZwNqLsxOxqz9idNgyuMPeV3tLaxp/A31NfgXhKFudOxlMKFM6eAb1OsFQyvGf5XOOKhAdkzmVwnrDdrt3Kjv58LYovRdb7l8ryWpEsRO23DI2Nic5a5rWbF7PCN+mVNcPSh1BAbmhSrKt7sabiRqACKNuyRL2wjBnunbuEYBW5A3Whv99PtJm1sY5yAChvizw1Nkjb+qF4zcOh3rjxoYcq0BKNwcg1knQyM2wkC4nbIjhd610iPfzzDizQOzGzRN6/sOx1oN1Tsnlc1Dx27tyclW5ZBEEBLveF6bSXGMVx/vlUYOZeap7UDVRi18jdEuG6zUTanyYQD9qBxKcUhOzrtYbOSdAY0Zt3wNDsE2UAZnU/4Ip1jKySVAa90VSOaroxEj7SO6cqcLMiKbtlpBx4NpgtRqN9NDRH2GMtFgmpIJMZondzsc1/SqX189g3YKb+hnMEsTXPyHlBeqfNyWv9yWv9MlBsWRU+Oyi0gytA7z6s7wh8RH4+t4ul7R3XHJgITUa/LSHVnW5YOfYf5KKUB4pGfLu47XUEeGT6fx/t4yAJ9PEU+HeYPfapYoKC3h9DQ1L7E1wQbgY8JNgs0EGPzci+wSKx8gpSeLHoPNZNqSITtwEpxWMgu0VjjmCCYwAItQHXgax0IGTCUHBhd8/hdXdnn5PkmxOR8avYMs43ajLCJdulUkRBTFqGDwLRPiGlYSHV9fiuUXGFCTPttjD1mG5MxKSTEcHt62HQYsNu9VLSrfGauN9dDug4AWmzWyq94dGjbuSGVrxYwY6O7zrpwsFtax7GYbnEGHKhDkwV05ne1VxjusYNUUWUUe+xwgqE1wQto6n96CV6QDqjd2o06QLit/zHBy3UtZRARtjkxc2xQzlrneAVtVzZBT/BxQm0Jv/R6vc6ITAsNoyXOBR/ShfwKAyEB3wcgAQJgUwohAYHKoiEhgQMrxTOWy8pgaEQAalwwACLQno62z6pVjpJ6EYHacb0FRGAyzRpQPl9cIOdWc0zg8P5ArRwO0buPOafujguwWeKacQHhaRdSBvxI2sbY5dx7t3gYx7crVTXqB6apgH5UGTF1sC8+O8KFkrRE0YFLYoC5EIXwSOdEEWHdVrvdmmOd4XQbYkeKOHBNaeuohh0v0rCuas2nPC0NGrnNRuhyPvAw4zuZrKlTqnzyl3STD3TWqd0/4jCg5bo82iNr6i1hstWCtCXDoeoGdSy6wUy2SdqvP68JNCngr2oZp61nX/c8gBYmUksc+5sz+/n58cvr5Pztm79/brzVjf1mxAOvxZZ8CvIqyoOIUQDSU0k7vsOxSdHBAOuaAn/s96c3XP0gkHXxrBryMb3AgIIovV02BGINI/vx2WxnQ6qPC/NTHN451lu1w8cbXHe+Z1Gx06svVL57m/trDNjyn8maRy5mtZkwcMBubn4pxbP6/OomcRrHpvSTGVGoyW9feaFyagrbwMLjjGJXMQKWktUpnlGixC9VjGTRX2WqMTs20aZRT/HtliHjY3PNP9DYFE3bYbiaRyNkX5qwIEBYXqamvFTqmLz586xaM4amcB6VNHUALGWkKbQtr+btw1lC/7QLuiXhIEzd/SgtGQ12YWUaXPq8D6mtmLvW8ps8M0UNZxB7IoYB1ugM+4N6fFAZ6or8EkqodGgHwwOfQOOhSiwgAAvkjE35ak1/fTN5+vDGXT69+/Lpkzdd/xfAaDlSm5CVlTaI42jlgGVtyhpH8eifFnEVmjFTy9tWZwSaiSqQFUhXma83AiQLN2sqYfSPm/ljNEtd5fli8WrvB9OFsrNaFfTLi67ZJAS4iXmp6pUnNlJFa4FEeMnwtRyysvA1Jk1k1Tr+G4iByz2npOe+tSN0Y1JHRpjm/pBnQ4EmI5edMNK1YmXksGch7EqK0zaqssxNq/2Xs3UxC1E1bl29fQCAitaXOTEveJYJWhlot1+nUFoUFamrK2TbwXQd3hU0rNOYPfQIfgEpWBHIVN4H4KRqkDCKc0ltG5hqCPcHUVYKhAQGR1ZQq5b43ak1IC0qo0p9/kBQ9gdajI49cFJCOxplJLYFxE1HjNmXgvmreMwkIgNNbPTGlfrnlMqiJWNoiDNG9fic3siyuheDruKW9jTFbFdovkZNazULEoA9VFaztKegE7D90li5dHiaai1nyTWooeUsOckbfex8gzV3rh0O+8DMTKsRy1XhtHy/YSHVDbKrG3GdcMGKnM/Dx8/bk2+3qVR+Ijl+4oq+pe4Dzdzbtuic1VwJtKjJcXzeM5C13dkWhaVBOMhlFhRvTM2E1LjnJByFETbMpdrmc9loBx1Qe0+pFK0Us4cb/ztiD1/ZyKErjINlGRz6ZxKnCut4O43U7j7Esyi94/8=7V1Zd6M4Fv41PmfmwRy0IXjMUqnu6a7qrmWqquclh9jEcZdtPJhs8+tHshGLJDBgsSQnqTqJjbGAq6vv7lcTdLF+eh/527sP4TxYTaA9f5qgywmEkEKb/eFHng9HAIX4cGQRLefJsezAl+X/guRg8sXF/XIe7AonxmG4ipfb4sFZuNkEs7hwzI+i8LF42m24Kl516y8C5cCXmb9Sj35fzuO75ChwvOyDX4Ll4i65tAvp4YO1L05OnmR358/Dx9wh9G6CLqIwjA+v1k8XwYpTT9Al+PYJx+/Bx7OHy7/hx+lquf7uTQ+DXTX5SvoIUbCJzQ6NyGHsB391nxAsedj4WVBwEYX325q3kNzqQxDFwZNufv0bMWxGQsZ8QbgO4uiZnSe+5QqyJ4yHk7eP2SRiNzl2l5s/IBjPTxhnkY6dEYe9SOjThFaOhlbOil33/Ia9WPAX0dL/eT0Lo+A6Wm4W4mN2ufQMhbx38ZrdxiVIPztK5HTSZCqXU5MUiUmQRRxCAYYu+408x1GIC2wNcYltEY9A5Nrid1e0pg1oPRoyY1siM7aAY2c/rkJlqiVyV0R1FaICixHkbE+pkK3xK1BFN06IJUPWs9VysWHHbsI4Dtfsg2AzP+NQzY+twtnP5jTehffRLKix+GI/WgSVIyacE8wLckGds9wsCByJgpUfLx+KkkM3Dclof4ZL9nTpdEObLSviZT/S6jg8ZPKlPFKLcZ6Kt1Nz2ANJlGH3LJI+7wlc4ylc88dm9byXxdF+RtiFgzkn3x7yJP5hUnPLX27CmP05f7xbxsGXrb+f7Eemc7BjORbbHfQHgDXsxjkUnfvJu1Vwy9f5jg3FLvuVf3Y5dbpc3YhAixRmBhDLhbmJEarDMRhFXa1wISSrxHlusW7CDZ+Tub+7C+bJDOQmgx//04/jINrsjzBG5BSPo/BnqkvBLta61+FaJxWI23b9CwahMoPAEgZpiggIUmlkDgkUin+eDZ1eQQELRa+K0VYrpuTXWPT+bnvQ/G+XT5wPz2+Xq9VFuGLwwgdCB+HZ5dIu0tZxLTePt8qihhoegp2taahK7YPMnt0Fs5/XKf5eqVMwEuEtuOX4gnZqLuhk3qa2ZUOiKq+nCnQxvEeVdWdL81x3CU8dWx4LEmIhG7rAIYhih9ZbwtnI4sTw9nYXdLPMkcJ7F5zp2KGvgu1K5P39enUV+esay1/Mm8eXeLqkUDcLfjjzEePjkNmvqS20GNvKyyiPEGU9OV4q2Qp6TGemClb9El8YULEjn6uUzOZMR0lbpkvnsxHT9U9JjdfiAk7OzhfB5noXROz2+fvz89S+jjIbutS7sQh3u+W2I8O7AWWFn9KWUVph4XRh92JqY9V/IQxtxsaJc6h3gS0Ie1RgE1xTYOOGApvJa4xUfDEkr6Gsf08hljRw1E56A1sd2q4eegziW+fwgXs+dPw1R8TNzW6bW+nMmt4UGNL57z13b5/fhpt4ejDOORMDuH1if/bftG/82U8uuDbz6eygtPNTosXNPyC/ATa17O5tiL3sNbH/mY2dIscelnKYcribIhY1u0PlEskoS3HAn8XLcJO75lI+1ch9dECp8/I7HCem4LoePIKaYgoiDuoKUxBQvD7MqmcWt2sDGyHiAbclpmDFXQAAGxkAl4j/5iwCNrv+c+6MLf/GruKxsXxziVpY6qJQvsCeRgK1w02YhTjVO9lUvZkvH2poPFxkX6/9jb8IIr3eUzY+O6twiQH0I6y4LhGTxHYayAGIQlVjIhpFtbcIENG5Mo9gL6NGPE0cxBxZ9z5iBT3TmAdbS9eR/yi0Mfu1AqpYI028KsBVGcIQomIFKdLgblMIJYrGpwx1FDOftI/RnVJGNK7TV2+QQQdbgGaRUKCiTa/2GYHN0WU0OvCyvQIcbWdVyu7gz77nesk0sYGFkjGDWbiZLVciu4KHq18nZAuQ6DOKLRQ5JGOqQ7WLt7G+q2jSRwbuOIZFXoZzu4W2pxgWnuoP0/shuwPcGt7vwQKGzUmssDJWnVq9RgmJ6jD/Gvmb3W0YrSfCa86x1+d4eZkiqfBFGmN0Zr4qnC7lbHTH+Mq0MGWzgDC2mqGhzcHyOpun8iTCk80alBg2et/eWB1Zby4/Ay6/MdxhlQPkJShnpG6WgsDaAZQz4VNLxY7jWg4iHuJZgsizxcM21c0g9SyEMSEepJQiFxevYnMUzbxF/appulTgl6s5YBGbFBLK845Yxv3qEWrI6LPQFYxqCQV9GHZKcoKVzBvvqDtCR/XO8jaJxos9CkwfQD4z9F/x+39lEnqkMo/WlXnuYDKPuPLyxcCIQ8JxGg5sTtJ9uUCf/pr955f5+x/ff0x/+32Lp79NVehV2GaQWizqFEWWQ1V3AtFZUcCEO+H227c1hVdnOPyKPzj/fqDXtxdTNTj0hXvrTWWHqRSunLBGGWDdUUp7j+VhtDRCMLvzd3c3y7xN1DJ6cCKVUv9VcVW6POaY+1GldVdlVNrnOT1+M+aIs5kpBBJkkGK4GWjcZb2Fm7VPCI8vE9O1nYYWi0RpnmJnp6Wd2IYqofuq7NQ+oOp0V6jWhzVnhvhSjbLD8/lz2oNa79mVLad9HNXlnuagxj2koFaROK+YVrFJXi+tAuMB1NI00yBNoy+OUL+2q3qcjpVOXQOAnJQ6PcuGM1tWowTKZJSZa0JRGcU0mqqrjZLnNf7HKllVPz8SeCL81kG4GMuSvt0ywLB6nJJl0DivUXK/0SNZjdLpSQBfm9LYUWa3lgXUKFKaJDGLAj8OJp3W8vfC5nZTNocAq+G9NzZvwebAHgmfA51ePgqfiCcBA/GsvI2qqn6O1u53DCh/n7frxY939lcHu2e/f5rfbq6jR41B8zm4DSIuFk3X0LlqxL8mZKTT26yGrl9KaiyW127vN5gX8SmS8suxhVFm8ENNhKUve187qbp0pCHs/eaUxnKtI6M0zgx+olbC9GbwayldbmCcnPBSEmxJDI/bKNgbAXbPepCY0rwiVAUseUWoileHSDfARV4DjseWdebhaFn5JDfcODKsOQtYS15NqW5ZkeRb0SR/8SozqF4MROgHrOsHy4wlr7uaSSaJCuub6cbERoDadP9LOCOa205ygSFPWkKUJLCBJRFmypaSu/p5FsA5sEKVNy1/G0Bs5XzENix8vRdLS8tpoEZ3y8StNhJFyAHFLLW9ypkjrNq3qar3l3G9B5S3sDxV8SkBMSv1uQrVRzWWR4Jrmv4SlVzZpHTRgWpw0RCwUVBczJArLjksaNnwUXYmHBm2384S+nnRdJYQNVp75jMeXzpaQ3AaQ0KNV7JKIjdgSM/GqjfIVIcyubMvj/6i9AcwFIRuO6YEclyK50ahzMMl+tKNiCsF/UYZXG7hU5FcjJ5Fc/SnA0eXgUrsIXKwWjhQlICrrdPIhssvqpEiMQqft6BjvqpQ11MPyz0TzXW0ViilFr3JTlNeAMdtS3a2vQlE9wd7Qi8muQrjefE4kz37RuMnc7WgFYRqQWhLEYdK5rfSQdTzRGncra/eh15/XlIfuixxXcsFmYvW1XTpH7ZHiwaoBnGiNyY1UbsVUWrhXOKcJmAx6I4I5eb5qTkz+PDx/Xbux8E1u1uf3SAysVK6sDCTqT7eamGi6PNVPNyk2ZhGQTCVZSA7i3j5Msxajcktgms73KWYEXAZssBcpuKAjcZk0EtqV+qmKkC7woFmqgZRo+tm8it44CzbvGXPXlbtv7wbDy5KujFEHkPFHLypqIiHREVNfghgwH7wRHD6HlBtgGwo40DV3MXvOAj2h1TIcr3Mx4+lEdsiFcStkOp0FGKPYyt7rtQGJaei9WGfHWB1TujxNBszBVpS7gkvqcYgZ9CroNVrszGds9RyDiA1oefMYp0zsKGX18z6vI92bFGmFO7fg28cturu0ZLBVpe9B+XkGacaXmqHJt1m444TthAs6F4DwlZ5PfzYLUvXkTsfciM+22vPVYNTwxqWQuMropOIJPJcjwduFa5vgmg3Zj0K1C1lF8zVoH4CO53VTzhE5pdiIrHXDpCoLBOrhzWER44jAwrn4pxvvTpXQv46b9NR/vXh4EkTZ+JLBva6w0NPqwXWbfyQ2+Ghux2ZZPmNzayWtH+8GJZq+XbkywWRkS4X1Ui/2m8dGfizu1TGsBcHKaMsmRFsIWlIOZA7aSAH6dh3sP0jYY3QYx/7R56GbQKyjmMbrIlt/ewf6cg1FC53vuUi/m3zJmXvrzSscKL11NBNbCCv2Yb4LeH6LeH6Zbg1YGO3BkaaXdaN1WnIQRFX5wdoUajRbNyRujXskehBuni5LF0HS09rnq4gTRLSTtJgnSyhpkY+FxiKeaLQmC00wS3Ht3dprsSYssaknerbFsUTpVtULVjpde2qgZSLtFFlib1iPtfRRJ6LmrRHNTlE8jY65palGgzpaGfXOjsuGHP6KvtEQ61j4rjXtyuyqx71gTe76IzDHZXD+6W1UD/eKkrfDJzRqRW1DZzGUZIuE+OIbBG4LVUNR9Za5YE6M2GapbW5I0kgQWqo45ALzyX27d6Le9hRhrd5Czer51Ix8vK9txgqfeRtqetPuYVf8N8CC0iRCnPCByoTpkzJ+F24gu3MZcX15cKV8yfb4pTbDqeMrXs1ElBSODNNtZhE7tlJQlNl5U0GF5oBRLVN1QBqpY46DoOc3O28WlxS03iO2h5er/qwrtnSy4MkTS+O0zLehoEkRA1BUtq4tqdOtaoFO0i9aXMLVcrwOZR1KWuy3xpTnYmK3kzUNxN1nCZqFSAM4Ph2sCFMpfDIQB1jqrZJjcQVg9SaA7kSCWv2sKU64wqYcF0/fwjf0x/+5dMf88f4Gtxtfz37qNn4Y7it0apns1EpeM9U1NinY69IqU/Wkm6olGfc5H40GnlXFSnaOdCleQ2SXt+YslhCXsoTErLsejpgdr2W0jrTx+j2CqkdfL1hMJNEtHvYYiF83ATR9SacJ8WVXV4SyY+alSN0/LATes5m8IJM6GXSXX91v4v5g3NM7/zB2eUddnk3vfzhwa93sR/f9/D4VCJ891f0cikaszt/s9jvGlJ60X4VWKoqsFUiLq/AVsFwk+Z2sLvceihXokDPyvWb8kQNTOMNbuRSlCPj9pvtoZ0YNVFLRtshMofqsp9Tk/1gTfbroI+iLQftp60jetQ9PpY5K6qK4OPM62usYhWJ6WHLydeU9JfVpyW1aq8e3H/2hRAWxgyxE7JYGtNcLuVhBsMRqvdrMOgq2L2ypP1xuL3eHIQvzkFYV8BpIjFVYDGAgEPAkIMQeQM7CFUDdpR5gy0Q17WonM2hoKyX6hb9hGU0iqdFuRnIHoatVHr5BrdvcDsCuK3Eiib2LBaRUJH6ryxBUxmEUsMq3LpYwT0ykKEMQiJ19gZ2dQahfD7uP4Nw+rf37td/nX94wN8Wz+cz+vPb6lG7Ab3E54NEmDCwJFMHawSAAzIxUUjhk+VwGxnw6SPcfL7y6PcguHm8suHMPv+iizINauFUTmqzfft6pyVUee/0WNNs5S/XPl89p8ZETiSt+NRRanywbTk561EAc9581FntJvQa7Tyo8SZQ5uftSgRW0TovAbX376kSsIrfGvRKQXZnOfNyBjrfnd5h0gpQ4LL5p1LIq678m8odrqYAFMObIgV5GIeudmLKw3CdBTzNLG5iyznnYJ9znjWq1jBQbyFPLa0NbPg4Y1TcN3Cvu/NRaiHBCx4w41nhFwBKYbNCwEyF+k7vDu3DeOym8qFEIUeyMGKfd4VFu6o4J9KGIVDSsXe5mUXBmn350BuZrZLd3sYb5qaS4CeDriTeXKM3end342WzJdokXZGBmJk7ty6AezQo3vdt7Zc+TW9ryxSDTL8w2rzw6FYdJ6kbsKa60dzeRsD1itKkK+1jSpEsuaaQYgsRgjHTeqhHcduGRlNXKfqd8lb5Tj6qbEwHaVz6BxR9nKleuWawIpxT6vNVB0BOxQCTU3vea5lLE9R8RVu2GDK9pP2Rkc3nKadjDbhji3ZSdXsdg5K8/3aAnThMx+92bvMAA3ilR0Lnzp3W45CoDZudMokKkEf7kqiuIlEPxmAmF3DL/uVTrHQwmQJSPbapSnmNuHQsGzQVkR6tLSL79DyomROfg1tm2PDGp4Y73rjupHN3rnCeE8UtQY/7HLVuCTO18FVuu6HzsgzpGpI7jxCmM1QV+XaVl6WPWujCPIYS3fGLTjNaDnZ70XZWpYkMTjquUyTup9Kkeh1Fu0kCFwpIomANpoLU3fG884ABbJuzBORd0rvLWdJjUZ1toGv3G1D4gv2Nnn8kn+7f/MXe2BYljjhwySlhp++e8+/+DKIle0I+1OHg0zL+kXtdGIy9z8bib55zb+SRDDOspgVL1XkjaXcAHFlOtuRiReDKm+O25mL2Ngo5GmanM3Xj7kM4D/gZ/wc=7V1bd5u6Ev41Weuch3ghCTA8JnaTnF5O0yZN2/3ihTG22cHgAk6c/PojcTEgyYAxAu91nIfW5iLBJ83MN5oZ+QKNVttb31gvv3gzy7mA0mx7gcYXEAKkSvg/cuQtPqKpMD6w8O1ZclF24MF+t5KDyX2LjT2zgsKFoec5ob0uHjQ917XMsHDM8H3vtXjZ3HOKva6NhcUceDANhz36056Fy+QoUPXsxJ1lL5ZJ1xocxidWRnpx8ibB0ph5r7lD6MMFGvmeF8afVtuR5RDwUlxg+OHG+Gt6e3On2a/f/ki/fi3Vy7ixm0Nu2b2Cb7lh46aff7nj+dMLcr9N0ZdbTVXvHvXLZCxfDGeT4JW8a/iWArjwvc265hMkT/pi+aG15Q2vMU2bzRDEU8/yVlbov+HrkruGWgJ6MuvUdBq+ZmOopgOzzA0fUJKDRjJvFru2M2zwhwQePlQ3Y3n07cHU7rW3S8PaPi3ev94n6Oah+rGeGaGFj3233QX5zwrs9+gjBSGeOWvycbNybnxjhT9evy7t0HpYGyY5/opFDx9bhiv8RGNATievCZBETqQvGH1jx6J0aOmxYDHvD1PAYMpgh1vBqqIGZEawjvXH3N5auNPrue04I8/x/KghJEV/IvBLzqLilAWAnbI8dKEocDmyPYIXV9cLy50Elo9fjHy/xnioDn6W66mPPy3CCKHkSHrAt43nien51sR0DHtlYOCSS/CTTbPbqLHLjY4o0GnUdYUBHfBQFzalEYM6fv2ryJ68Ta3JJtIZEz9SEzdKGWQEBBsbsivHXrj42NQLQ2+FT1ju7IpYRnLM8czng+ENvI1vWtVyGRr+wgqrp5g1K9jgUg2TKnbfcozQfilaad5oJK3dezaZdem4A6k47pe7A2kb8Usmt+XtIt2SVtVSDAPTUjQ9di/ZfMbIHCW4V/7iiXMasqcASvbkgSwpQyBDjfwLACuKElcUB4quQKRJ6b+CRFPZC/TMfimgp/7ZEFZ3jYEIL41Y/ogMO9Y8zM6m2INBIuKZcrwBuUGKWi9q1mM7hLkO8YyYrKzV1PID3BkU2zGKT9vBxCdsBxsEb7V2LEKDRL+zHJ+eb8LNThBE96nkdbcx9fwweW/xXasJ0q7pWyssudEbT7B0Brbnlg30SVoUWNOiyP/PFiVrKb3Qm88D61irwx04lqcwM6cXz08pYocgy6IVxLEieguEDm4fN97a+Li4m0Bp+uzdubc/OVbjznBnDvH7PuLRatnfkxr7e2jPCJTKjjAoH70XAE1gXH0Yhr+2Dz/9qf7OcaEZ6Hpz9+rDt9MXhYkqKwMMXPYHmWkryvm7ffrpf8boXoH77Vfv4fefcTDlQt0HqTwYVhkWcYX6QNczUimh/kglF2jeEkarpLIzage7JrEJl8QGLuGvsUd8AqwGsayGaxtUltWUSWMPrEaWKS5CK/e6pEaBFQ215yWXQf2PWs063KboFPthyY+w1Swu6hqDemE1axkxocnfKQ+6YQ1654Jaxjzyglo2y3oQVDSklo+beh+yUtFQe4IaBK78SZ6/X33472/4ON3KVx+/XYKT8yxSJgEGQ42Rp6HMo2VIG6SPfYxM8QGCDEDfkzWFJJLUmjuh1fEmykfxIHdCKJhbc/ztb+NNfX+Z/Xh5lZ9nD68rHs89JbNwLLZplINam5AJu83/yd3ZCe4w1Ajk5WbxlRkSN21nED4bU8u59wI7JEtbOcNAW4zQoya6twkd27VGu+QBMVOc7/IhpXIUeO61KmoQWL3SElOPTyYLvl0svKauiLda2cQRkcR6BdbWDn+RyUPWe6NvvwvfxttkZkVf3nJf7i3fxoNn+c2mXp63lMlVnreUqcEeeItCORgQsNq+LnVRad+f01Z77OWTubwPw1v05e3T98fJzVR/2f5owZ5whGHPkssE2xhjQdxenpnZ1359ERCmB2WNMUeStFtrSflN3hgpCs8Ytb9Ewx1T1jjtFlfwVJr4xmuq1KQyOI9zX47TAxz/pWz6nvXAsV4M6/gyU2OPE3OYjB0cH6FlD0gZNvnFZoXDPACEA1FeDas4H33DDeaev9r5NSPSMVFiY9dKZU66GJLDWKSwWiwjjUdmzkESM0gkNyIdtYVU2zN0pVLVOf6skhsl7ElKvcvWsNUbu5X1sUzPUmpFRQN9mKPcnHQvfjhAEOws5465q2qsCHjuNFjnbHewNlwuLZ57bngZjVJklgBcb4lokDulqWE+E13jzi7NOL4VsfHF9F9YGZCrIrmCsp59VqR/s6Q6JjI5GhE/TZFdHPaETBdJK3Z6wDAjly7r0+bQmeOfQwBS1/ufsB2KkCojE0sM8R8OFKE8ZeBeh1jKUKY4KilDIpCY6slDlZG6tlZBKWmXB6jgYTfjE0wQRFEGqsQGa/tJzygbvb5D5e0ocaQU4NeIu5D7Y/MERYXKua/DRpZGhuMQXbLXZLpeWAP3WEuNgbx3JYsiJFjNmJgUPZJzY/xcAs0qtZKlD2QpJ2acZfIuR2RYY/JnCtX1XDIYMyNYktkdQZ8bBXL83gixjnWjI1BCBOrQ9553FUHwhPRxgxAUj2sq7ShkeuUZezLlurOuTqYTvS6B1kgptxa2Ygk0TFYJzlzuzOV6X+4pc7rrczdZ1YeiuBudISLLR6wA0QEvTlt7FAIeG+Mtd9maXBCUUE5KEalK2tO+h6PvAKpO3YI/xI8hkjd61+urR8Pb/rGQf6uN72TLWNRJsTyJgk6UZrrk5yHgkQxZFMuALFYjb7XexCWdba+Z1MvsLR3TgxagugbzREo5jwSQT4+BjnWCluPHrP/bqceSYnuqWQ8tDQKVDLcrq+giy4GPe0Vxpxmrj6xEiJWB1ghGGcaVvgjkxJVLZ1olw3grCg6HXRywCESnmg6pJmqv+9DOC92QYJ8C9lDa2Y7syXQ24vB00vD5UAsv7nQ3qwkGOoySlILOSjy77hb1UGUp9/OqSj/dJoWdhuNMvFc3Lu0Q3OUwrSWdWdu4U/F9avHptW/NHTsIxdev6Ek5ibm0ZhsntcKTMApDR2+MxD4ANn67gpY1NuIkxSdPBYT2DXqQWwC7n8oA9dCnnPWZlWOJ7lQ5qNPT5JmwLs+sW/fdCs9MG0ENeSWdgkO30x6tvHq6/ihPP//5j/Pkrj4F6C/v9586tZ3m0giWp0Imh/R6mUZYYW6lnyWTZRGNY7gjF09xJZwXw2vcOHbF4QhP8JFyMRyndLKbTSTwA2BwR9g6ZD27HtmQr9uuYYddF1rJPY1KnoYMBTYhB6MhiJd1PCbabvZtTNMKAk98XTF3NHT6vWs8QXZvg8mU3Yya3wykvcNVnabNb++wV/ln2HyZtfllZqzTpSU6RgWalkPTASqmofY4wPyH/zJ8Ak/fb98/QGR8GT08uXUKrnJxntrxm7ohof2pPlTcTmFHTVU5xQAItGDduSW9LFJjD38f7Va7Wwvx1EqLLR3Nw/bqFIbj9TMwjeW3319nf//wXq2vq7fPDi8p55RiDkfiuifdGIt1VeSn24J8TuSHhqmLMFtLaFNlHlJvYTYu1rwcTMGr9+3gqtBb0SIyi7PVe3YOd7V2z8V5f5TkH7qFTte7MXa3ZY+crXDmOhX8lkrTnRBbXa/vbFCHWWli3Cf+LzS6W77ubjaBruUzXbA2HcvwO0b3JHa7KrMvec+ujAFWbqKT2M26WYnMdpkyZ9ujBq6eWqw62JPDc7DjJx/UbGUpSfq0RQjoFWU2YfDQNEc4LHYBgVL6QPT1QFEpQ5/LcGzg1XJnjs4wAYF7bXbhru0jZDXcClFljHwnmY0ppBHcc/b7Ofv9JM2MfhJmBlJmhnFH6xoWJFc0dKApEb+FNOu3ncwv4rSktiVqSJRqtS0qcskdATbpLU2DOWvts9buW2uXKY2etTaiSpKVplpbBhUNtVSldCx9T79XFCilCS6Fe6udkQYWhTuJeDQ0FqKZl19wEvi7SXWFoGx1Pi8EZcLSx69cUPLEyE7t+l21oiHBAdDDdpntLgBKF7rxwtYahyOgNraF5CIFu0Bq/xiVz2phUHCNT41dvXqLnsE9+NeOngG9r1+g4GLNruWcfrz48DGgatSg1OFOvFzY95vQXY3ayg4nQWgs8KwlmVciq9TKUM5b0zJpraSUdRcChNaoyQ3Xk5kaNbqh9uwof750uXFcvSLoI0WzE/PC39+hh3q/dvSYQrMWIJ12vR8QXu+XVu50ENnuLuKaxLL9uDTJmL2J73JIl7EQ2xNuush27q1nvYcaJamHPhNRSYvdzKXhLrrIY0/j+N13nP7+6S4X5cWMyE8Pv8d5FLfSWW5VatLqrtdd0v7vZTt0S6F5dtP1OlTRUFu7CtH9QL30uejrASoJt1dlD9AUUtw2lYDdPLH3X8dsiRExOyCwG2d1u/sIYDOwRWWy9wk0RL0DzfnVs8F5W+dzWO2U7XTdbUMF22mqfgI1ra+ifwaWaehE7DRMPON6dhrQmKPODDW7HJumj5912lmnnaZOqxsmFazTaJGniUf9XIGKhk5Fp6mguU4DInwPfrIAo9F6375TECPm7NkrjBFzoYYM1Gkp0Nl4nI1H38ajTDv0bDso5ag03XFIr2jnRCwHgrC55ZCHXVmOGoW9LW2g1I5B0OhdKwDoawMlLp4CC3jTaNwmySwXXqaX1vDOfYtMAOEViU02pDluVyadbIEkHbIFUt+GoDQyUWkJ6pYjtpoeQmVHIkQpt6Y/XcU01Dg9BH/1PTI7sssx+11+wZOBXPE/
\ No newline at end of file
diff --git a/src/chash.erl b/src/chash.erl
index ec42297f2..c7f1a343b 100644
--- a/src/chash.erl
+++ b/src/chash.erl
@@ -36,6 +36,7 @@
-module(chash).
+
-export([contains_name/2, fresh/2, lookup/2, key_of/1,
members/1, merge_rings/2, next_index/2, nodes/1,
predecessors/2, predecessors/3, ring_increment/1,
diff --git a/src/chashbin.erl b/src/chashbin.erl
index 6404ae0fd..1e8d1e47d 100644
--- a/src/chashbin.erl
+++ b/src/chashbin.erl
@@ -23,6 +23,7 @@
%% -------------------------------------------------------------------
-module(chashbin).
+
-export([create/1, to_chash/1, to_list/1,
to_list_filter/2, responsible_index/2,
responsible_position/2, index_owner/2,
@@ -164,6 +165,16 @@ iterator(HashKey, CHBin) ->
Pos = responsible_position(HashKey, CHBin),
#iterator{pos = Pos, start = Pos, chbin = CHBin}.
+%% @doc Return iterator pointing to the given index
+-spec exact_iterator(Index :: index() | <<_:160>>,
+ CHBin :: chashbin()) -> iterator().
+
+exact_iterator(<>, CHBin) ->
+ exact_iterator(Idx, CHBin);
+exact_iterator(Idx, CHBin) ->
+ Pos = index_position(Idx, CHBin),
+ #iterator{pos = Pos, start = Pos, chbin = CHBin}.
+
%% @doc Return the `{Index, Owner}' pair pointed to by the iterator
-spec itr_value(iterator()) -> {index(), node()}.
@@ -231,7 +242,8 @@ itr_next_while(Pred, Itr) ->
%% Internal functions
%% ===================================================================
-%% Convert list of {Index, Owner} pairs into `chashbin' binary representation
+%% @private
+%% @doc Convert list of {Index, Owner} pairs into `chashbin' binary representation
-spec create_bin([{index(), node()}],
[{node(), pos_integer()}], binary()) -> owners_bin().
@@ -241,15 +253,12 @@ create_bin([{Idx, Owner} | Owners], Nodes, Bin) ->
Bin2 = <>,
create_bin(Owners, Nodes, Bin2).
-%% Convert ring index into ring position
+%% @private
+%% @doc Convert ring index into ring position
+-spec index_position(Index :: index() | <<_:160>>,
+ CHBin :: chashbin()) -> integer().
+
index_position(<>, CHBin) ->
index_position(Idx, CHBin);
index_position(Idx, #chashbin{size = Size}) ->
Inc = chash:ring_increment(Size), Idx div Inc rem Size.
-
-%% Return iterator pointing to the given index
-exact_iterator(<>, CHBin) ->
- exact_iterator(Idx, CHBin);
-exact_iterator(Idx, CHBin) ->
- Pos = index_position(Idx, CHBin),
- #iterator{pos = Pos, start = Pos, chbin = CHBin}.
diff --git a/src/gen_nb_server.erl b/src/gen_nb_server.erl
index 1e1395fd7..93b3e7edb 100644
--- a/src/gen_nb_server.erl
+++ b/src/gen_nb_server.erl
@@ -90,13 +90,12 @@
{stop, Reason :: term(),
NewState :: term()}.
-%% @spec start_link(Module, IpAddr, Port, InitParams) -> Result
-%% Module = atom()
-%% IpAddr = string()
-%% Port = integer()
-%% InitParams = [any()]
-%% Result = {ok, pid()} | {error, any()}
-%% @doc Start server listening on IpAddr:Port
+%% @doc Start server listening on `IpAddr:Port'.
+-spec start_link(Module :: atom(), IpAddr :: string(),
+ Port :: integer(), INitParams :: [any()]) -> {ok,
+ pid()} |
+ {error, any()}.
+
start_link(Module, IpAddr, Port, InitParams) ->
gen_server:start_link(?MODULE,
[Module, IpAddr, Port, InitParams], []).
diff --git a/src/riak_core.erl b/src/riak_core.erl
index 1ee62dbfb..be9591828 100644
--- a/src/riak_core.erl
+++ b/src/riak_core.erl
@@ -21,6 +21,7 @@
%% -------------------------------------------------------------------
-module(riak_core).
+
-export([stop/0, stop/1, join/1, join/4, staged_join/1,
remove/1, down/1, leave/0, remove_from_cluster/1]).
@@ -43,12 +44,19 @@
-define(WAIT_POLL_INTERVAL, 100).
-%% @spec stop() -> ok
-%% @doc Stop the riak application and the calling process.
+%% @doc Stop the riak core lite application and the calling process.
+-spec stop() -> ok.
+
stop() -> stop("riak stop requested").
-ifdef(TEST).
+%% @doc Stop the riak core lite application with a given reason without halting
+%% the node for testing purposes.
+%% @param Reason Reason to be logged on stop.
+%% @returns `ok'.
+-spec stop(Reason :: term()) -> ok.
+
stop(Reason) ->
logger:notice("~p", [Reason]),
% if we're in test mode, we don't want to halt the node, so instead
@@ -57,6 +65,11 @@ stop(Reason) ->
-else.
+%% @doc Stop the riak core lite application with a given reason.
+%% @param Reason Reason to be logged on stop.
+%% @returns `ok'.
+-spec stop(Reason :: term()) -> ok.
+
stop(Reason) ->
% we never do an application:stop because that makes it very hard
% to really halt the runtime, which is what we need here.
@@ -65,34 +78,111 @@ stop(Reason) ->
-endif.
-%%
-%% @doc Join the ring found on the specified remote node
-%%
+%% @doc Join the ring found on the specified remote node.
+%% @param Node Remote node owning the ring to join.
+%% @returns `ok' on successful join, `{error, Reason}' otherwise.
+-spec join(Node :: node()) -> ok |
+ {error,
+ self_join | not_reachable |
+ unable_to_get_join_ring | node_still_starting |
+ not_single_node | different_ring_sizes}.
+
join(Node) -> join(Node, false).
%% @doc Join the remote cluster without automatically claiming ring
%% ownership. Used to stage a join in the newer plan/commit
-%% approach to cluster administration. See {@link riak_core_claimant}
+%% approach to cluster administration.
+%% @param Node Remote node with the ring to join.
+%% @returns `ok' on successful join, `{error, Reason}' otherwise.
+%% @see riak_core_claimant.
+-spec staged_join(Node :: node()) -> ok |
+ {error,
+ self_join | not_reachable |
+ unable_to_get_join_ring |
+ node_still_starting | not_single_node |
+ different_ring_sizes}.
+
staged_join(Node) -> join(Node, false).
+%% @doc Like {@link join/1} with a flag indicaiting automatically claiming ring
+%% ownership.
+%% @param Auto Boolean indicating if the node automatically claims ring
+%% ownership.
+-spec join(NodeStr :: atom() | string(),
+ Auto :: boolean()) -> ok |
+ {error,
+ self_join | not_reachable |
+ unable_to_get_join_ring |
+ node_still_starting | not_single_node |
+ different_ring_sizes}.
+
join(NodeStr, Auto) when is_list(NodeStr) ->
join(riak_core_util:str_to_node(NodeStr), Auto);
join(Node, Auto) when is_atom(Node) ->
join(node(), Node, Auto).
+%% @doc Like {@link join/2} with the joining node as an additional parameter.
+%% Check if a self-join is happening, and assures the joining node is the
+%% local node.
+%% @param JoiningNode Node that joins the cluster.
+-spec join(JoiningNode :: node(), JoinedNode :: node(),
+ Auto :: boolean()) -> ok |
+ {error,
+ self_join | not_reachable |
+ unable_to_get_join_ring |
+ node_still_starting | not_single_node |
+ different_ring_sizes}.
+
join(Node, Node, _) -> {error, self_join};
join(_, Node, Auto) -> join(node(), Node, false, Auto).
+%% @doc Like {@link join/3} with a flag to mark a rejoin. Check if the remote
+%% node is reachable.
+%% @param Rejoin Boolean to mark if this is a rejoin.
+-spec join(JoiningNode :: node(), JoinedNode :: node(),
+ Rejoin :: boolean(), Auto :: boolean()) -> ok |
+ {error,
+ not_reachable |
+ unable_to_get_join_ring |
+ node_still_starting |
+ not_single_node |
+ different_ring_sizes}.
+
join(_, Node, Rejoin, Auto) ->
case net_adm:ping(Node) of
pang -> {error, not_reachable};
pong -> standard_join(Node, Rejoin, Auto)
end.
+%% @private
+%% @doc Retrieve the remote ring via RPC.
+%% @param Node Remote node which got the ring.
+%% @returns The remote ring or `badrpc, rpc_process_down' if the rpc fails.
+%% @see riak_core_util:safe_rpc/4.
+-spec get_other_ring(Node :: node()) -> {ok,
+ riak_core_ring:riak_core_ring()} |
+ {badrpc, rpc_process_down}.
+
get_other_ring(Node) ->
riak_core_util:safe_rpc(Node, riak_core_ring_manager,
get_raw_ring, []).
+%% @private
+%% @doc Join the ring of the given node locally and distribute the new ring.
+%% @param Node Remote node wich got the ring.
+%% @param Rejoin Boolean indicating if this is a rejoin.
+%% @param Auto Boolean indicating if this node automatically claims ring
+%% ownership.
+%% @returns `ok' on successful join, `{error, Reason}' otherwise.
+-spec standard_join(Node :: node(), Rejoin :: boolean(),
+ Auto :: boolean()) -> ok |
+ {error,
+ not_reachable |
+ unable_to_get_join_ring |
+ node_still_starting |
+ not_single_node |
+ different_ring_sizes}.
+
standard_join(Node, Rejoin, Auto) when is_atom(Node) ->
case net_adm:ping(Node) of
pong ->
@@ -103,14 +193,30 @@ standard_join(Node, Rejoin, Auto) when is_atom(Node) ->
pang -> {error, not_reachable}
end.
-%% `init:get_status/0' will return a 2-tuple reflecting the init
+%% @private
+%% @doc `init:get_status/0' will return a 2-tuple reflecting the init
%% status on this node; the first element is one of `starting',
%% `started', or `stopping'. We only want to allow join actions if all
%% applications have finished starting to avoid ring status race
%% conditions.
+-spec init_complete(Status :: {init:internal_status(),
+ term()}) -> boolean().
+
init_complete({started, _}) -> true;
init_complete(_) -> false.
+%% @private
+%% @doc Like {@link standard_join/3} with the remote ring already as a
+%% parameter.
+%% @param Ring Ring retrieved from the remote node.
+-spec standard_join(Node :: node(),
+ Ring :: riak_core_ring:riak_core_ring(),
+ Rejoin :: boolean(), Auto :: boolean()) -> ok |
+ {error,
+ node_still_starting |
+ not_single_node |
+ different_ring_sizes}.
+
standard_join(Node, Ring, Rejoin, Auto) ->
{ok, MyRing} = riak_core_ring_manager:get_raw_ring(),
InitComplete = init_complete(init:get_status()),
@@ -133,11 +239,28 @@ standard_join(Node, Ring, Rejoin, Auto) ->
riak_core_gossip:send_ring(Node, node())
end.
+%% @private
+%% @doc Set the Status of the node to autojoin if the `Auto'-flag is `true'.
+%% @param Auto Boolean indicating if this node is auto-joining.
+%% @param Node Node that is joining.
+%% @param Ring Ring the node is joining.
+%% @returns The updated ring.
+-spec maybe_auto_join(Auto :: boolean(), Node :: node(),
+ Ring ::
+ riak_core_ring:riak_core_ring()) -> riak_core_ring:riak_core_ring().
+
maybe_auto_join(false, _Node, Ring) -> Ring;
maybe_auto_join(true, Node, Ring) ->
riak_core_ring:update_member_meta(Node, Ring, Node,
'$autojoin', true).
+%% @doc Remove a node from the cluster and cause all owned partitions to be
+%% redistributed.
+%% @param Node Node to be removed.
+%% @returns `ok' if the removal was successful or `{error, Reason}' otherwise.
+-spec remove(Node :: node()) -> ok |
+ {error, not_member | only_member}.
+
remove(Node) ->
{ok, Ring} = riak_core_ring_manager:get_raw_ring(),
case {riak_core_ring:all_members(Ring),
@@ -148,6 +271,13 @@ remove(Node) ->
_ -> standard_remove(Node)
end.
+%% @private
+%% @doc Remove the given node from the cluster and redistribute all partitions
+%% owned by this node.
+%% @param Node Node that is to be removed.
+%% @returns `ok'.
+-spec standard_remove(Node :: node()) -> ok.
+
standard_remove(Node) ->
riak_core_ring_manager:ring_trans(fun (Ring2, _) ->
Ring3 =
@@ -162,6 +292,12 @@ standard_remove(Node) ->
[]),
ok.
+%% @doc Mark a downed node as downed on the ring.
+%% @param Node Node that is down.
+%% @returns `ok' if the transition was successful, `{error, Reason}' otherwise.
+-spec down(Node :: node()) -> ok |
+ {error, is_up | not_member | only_member}.
+
down(Node) ->
{ok, Ring} = riak_core_ring_manager:get_raw_ring(),
case net_adm:ping(Node) of
@@ -188,6 +324,11 @@ down(Node) ->
end
end.
+%% @doc Leave the cluster with the local node.
+%% @returns `ok' if the leave was successful, `{error, Reason}' otherwise.
+-spec leave() -> ok |
+ {error, not_member | only_member | already_leaving}.
+
leave() ->
Node = node(),
{ok, Ring} = riak_core_ring_manager:get_raw_ring(),
@@ -200,6 +341,12 @@ leave() ->
{_, _} -> {error, already_leaving}
end.
+%% @private
+%% @doc Mark a node as leaving to be removed in the future.
+%% @param Node Leaving node.
+%% @returns `ok'.
+-spec standard_leave(Node :: node()) -> ok.
+
standard_leave(Node) ->
riak_core_ring_manager:ring_trans(fun (Ring2, _) ->
Ring3 =
@@ -211,25 +358,47 @@ standard_leave(Node) ->
[]),
ok.
-%% @spec remove_from_cluster(ExitingNode :: atom()) -> term()
%% @doc Cause all partitions owned by ExitingNode to be taken over
%% by other nodes.
+%% @param ExitingNode Exiting node.
+%% @returns `ok' if the removal was successful or `{error, Reason}' otherwise.
+-spec remove_from_cluster(ExitingNode :: atom()) -> ok |
+ {error,
+ not_member | only_member}.
+
remove_from_cluster(ExitingNode)
when is_atom(ExitingNode) ->
remove(ExitingNode).
+%% @doc Retrieve list of all vnode modules.
+%% @returns List of tuple containing app name and vnode modules registered with
+%% the application.
+-spec vnode_modules() -> [{atom(), module()}].
+
vnode_modules() ->
case application:get_env(riak_core, vnode_modules) of
undefined -> [];
{ok, Mods} -> Mods
end.
+%% @doc Retrieve list of all stat modules.
+%% @returns List of tuple containing application name and stat module name
+%% registered with the application.
+-spec stat_mods() -> [{atom(), module()}].
+
+%% TODO Are stats still used?
stat_mods() ->
case application:get_env(riak_core, stat_mods) of
undefined -> [];
{ok, Mods} -> Mods
end.
+%% @doc Find the health-check module for a given app name.
+%% @param App Name of the application the health-check module should be returned
+%% for.
+%% @returns Module name of the health-check module or `undefined'.
+-spec health_check(App :: atom()) -> mfa() | undefined.
+
health_check(App) ->
case application:get_env(riak_core, health_checks) of
undefined -> undefined;
@@ -240,8 +409,12 @@ health_check(App) ->
end
end.
-%% Get the application name if not supplied, first by get_application
-%% then by searching by module name
+%% @private
+%% @doc Get the application name if not supplied, first by get_application
+%% then by searching by module name.
+-spec get_app(App :: atom(),
+ Module :: module()) -> atom().
+
get_app(undefined, Module) ->
{ok, App} = case application:get_application(self()) of
{ok, AppName} -> {ok, AppName};
@@ -251,9 +424,18 @@ get_app(undefined, Module) ->
get_app(App, _Module) -> App.
%% @doc Register a riak_core application.
+%% @param Props List of properties for the app.
+%% @returns `ok'.
+-spec register(Props :: [term()]) -> ok.
+
register(Props) -> register(undefined, Props).
%% @doc Register a named riak_core application.
+%% @param App Name of the application.
+%% @param Props List of application properties.
+%% @returns `ok'.
+-spec register(App :: atom(), Props :: [term()]) -> ok.
+
register(_App, []) ->
%% Once the app is registered, do a no-op ring trans
%% to ensure the new fixups are run against
@@ -274,6 +456,14 @@ register(App, [{health_check, HealthMFA} | T]) ->
health_checks),
register(App, T).
+%% @doc Register a module in a role for an application-
+%% @param App APplication name.
+%% @param Module Module to register.
+%% @param Type Role of the module.
+%% @returns `ok'.
+-spec register_mod(App :: atom(), Module :: module(),
+ Type :: atom()) -> ok.
+
register_mod(App, Module, Type) when is_atom(Type) ->
case Type of
vnode_modules ->
@@ -287,6 +477,14 @@ register_mod(App, Module, Type) when is_atom(Type) ->
lists:usort([{App, Module} | Mods]))
end.
+%% @doc Register metadata for an application.
+%% @param App Name of the application.
+%% @param Value Value of the metadata.
+%% @param Type Type of the metadata.
+%% @returns `ok'.
+-spec register_metadata(App :: atom(), Value :: term(),
+ Type :: atom()) -> ok.
+
register_metadata(App, Value, Type) ->
case application:get_env(riak_core, Type) of
undefined ->
@@ -296,39 +494,45 @@ register_metadata(App, Value, Type) ->
lists:usort([{App, Value} | Values]))
end.
-%% @spec add_guarded_event_handler(HandlerMod, Handler, Args) -> AddResult
-%% HandlerMod = module()
-%% Handler = module() | {module(), term()}
-%% Args = list()
-%% AddResult = ok | {error, Reason::term()}
+%% @doc Adds an event handler to a gen_event instance.
+%% @param HandlerMod Module acting as ???.
+%% @param Handler Module acting as the event handler.
+%% @param Args Arguments for the handler initialization.
+%% @returns `ok' if the adding was successful, `{error, Reason}' otherwise.
+%% @see add_guarded_event_handler/4.
+-spec add_guarded_event_handler(HandlerMod :: module(),
+ Handler :: module() | {module(), term()},
+ Args :: [term()]) -> ok |
+ {error, Reason :: term()}.
+
add_guarded_event_handler(HandlerMod, Handler, Args) ->
add_guarded_event_handler(HandlerMod, Handler, Args,
undefined).
-%% @spec add_guarded_event_handler(HandlerMod, Handler, Args, ExitFun) -> AddResult
-%% HandlerMod = module()
-%% Handler = module() | {module(), term()}
-%% Args = list()
-%% ExitFun = fun(Handler, Reason::term())
-%% AddResult = ok | {error, Reason::term()}
-%%
%% @doc Add a "guarded" event handler to a gen_event instance.
%% A guarded handler is implemented as a supervised gen_server
%% (riak_core_eventhandler_guard) that adds a supervised handler in its
%% init() callback and exits when the handler crashes so it can be
%% restarted by the supervisor.
+%% @param HandlerMod
+%% @param Handler
+%% @param Args
+%% @param ExitFun
+%% @returns `ok' if the adding was successful, `{error, Reason}' otherwise.
+-spec add_guarded_event_handler(HandlerMod :: module(),
+ Handler :: module() | {module(), term()},
+ Args :: [term()],
+ ExitFun :: fun((module() | {module(), term()},
+ term()) -> any()) |
+ undefined) -> ok |
+ {error,
+ Reason :: term()}.
+
add_guarded_event_handler(HandlerMod, Handler, Args,
ExitFun) ->
riak_core_eventhandler_sup:start_guarded_handler(HandlerMod,
Handler, Args, ExitFun).
-%% @spec delete_guarded_event_handler(HandlerMod, Handler, Args) -> Result
-%% HandlerMod = module()
-%% Handler = module() | {module(), term()}
-%% Args = term()
-%% Result = term() | {error, module_not_found} | {'EXIT', Reason}
-%% Reason = term()
-%%
%% @doc Delete a guarded event handler from a gen_event instance.
%%
%% Args is an arbitrary term which is passed as one of the arguments to
@@ -338,14 +542,34 @@ add_guarded_event_handler(HandlerMod, Handler, Args,
%% specified event handler is not installed, the function returns
%% {error,module_not_found}. If the callback function fails with Reason,
%% the function returns {'EXIT',Reason}.
+-spec delete_guarded_event_handler(HandlerMod ::
+ module(),
+ Handler :: module() | {module(), term()},
+ Args :: term()) -> term().
+
delete_guarded_event_handler(HandlerMod, Handler,
Args) ->
riak_core_eventhandler_sup:stop_guarded_handler(HandlerMod,
Handler, Args).
+%% @private
+%% @doc Find the name of the application the given module is registered for.
+%% @param Mod Name of the module.
+%% @returns `{ok, App}' when the app is found, `{ok, undefined}' otherwise.
+-spec app_for_module(Mod :: module()) -> {ok, atom()}.
+
app_for_module(Mod) ->
app_for_module(application:which_applications(), Mod).
+%% @private
+%% @doc Find the name of the application from the list of applications the given
+%% module is registered for.
+%% @param Apps List of application names to search in.
+%% @param Mod Name of module to search for.
+%% @returns `{ok, App}' when the app is found, `{ok, undefined}' otherwise.
+-spec app_for_module(Apps :: [atom()],
+ Mod :: module()) -> {ok, atom()}.
+
app_for_module([], _Mod) -> {ok, undefined};
app_for_module([{App, _, _} | T], Mod) ->
{ok, Mods} = application:get_key(App, modules),
@@ -354,9 +578,20 @@ app_for_module([{App, _, _} | T], Mod) ->
false -> app_for_module(T, Mod)
end.
+%% @doc Only returns when the given application is registered and periodically
+%% logs state.
+%% @param App Name of the application to wait for.
+%% @returns `ok' when the app is registered.
+-spec wait_for_application(App :: atom()) -> ok.
+
wait_for_application(App) ->
wait_for_application(App, 0).
+%% @private
+%% @doc Helper for {@link wait_for_application/1}.
+-spec wait_for_application(App :: atom(),
+ Elapsed :: integer()) -> ok.
+
wait_for_application(App, Elapsed) ->
case lists:keymember(App, 1,
application:which_applications())
@@ -383,9 +618,20 @@ wait_for_application(App, Elapsed) ->
Elapsed + (?WAIT_POLL_INTERVAL))
end.
+%% @doc Only returns when the given service is registered and periodically
+%% logs state.
+%% @param Service Name of the service to wait for.
+%% @returns `ok' when the service is registered.
+-spec wait_for_service(Service :: atom()) -> ok.
+
wait_for_service(Service) ->
wait_for_service(Service, 0).
+%% @private
+%% @doc Helper for {@link wait_for_service/1}.
+-spec wait_for_service(Service :: atom(),
+ Elapsed :: integer()) -> ok.
+
wait_for_service(Service, Elapsed) ->
case lists:member(Service,
riak_core_node_watcher:services(node()))
@@ -411,5 +657,9 @@ wait_for_service(Service, Elapsed) ->
Elapsed + (?WAIT_POLL_INTERVAL))
end.
+%% @doc Retrieve the stat prefix.
+-spec stat_prefix() -> term().
+
+%% TODO stats are not used anymore, remove?
stat_prefix() ->
application:get_env(riak_core, stat_prefix, riak).
diff --git a/src/riak_core_apl.erl b/src/riak_core_apl.erl
index df4778746..a352d54f0 100644
--- a/src/riak_core_apl.erl
+++ b/src/riak_core_apl.erl
@@ -24,6 +24,7 @@
%% -------------------------------------------------------------------
-module(riak_core_apl).
+
-export([active_owners/1, active_owners/2, get_apl/3,
get_apl/4, get_apl_ann/2, get_apl_ann/3, get_apl_ann/4,
get_apl_ann_with_pnum/1, get_primary_apl/3,
@@ -76,7 +77,11 @@ active_owners(Service) ->
active_owners(Ring,
riak_core_node_watcher:nodes(Service)).
--spec active_owners(ring(), [node()]) -> preflist_ann().
+%% @doc Like {@link active_owners/1} with a specified ring and list of up nodes.
+%% @param Ring Ring to determine the owners.
+%% @param UpNodes List of node that are considered up.
+-spec active_owners(Ring :: ring(),
+ UpNodes :: [node()]) -> preflist_ann().
active_owners(Ring, UpNodes) ->
UpNodes1 = UpNodes,
@@ -114,6 +119,9 @@ get_apl(DocIdx, N, Ring, UpNodes) ->
%% @doc Get the active preflist taking account of which nodes are up for a given
%% chash/upnodes list and annotate each node with type of primary/fallback.
+-spec get_apl_ann(DocIdx :: docidx(), N :: n_val(),
+ UpNodes :: [node()]) -> preflist_ann().
+
get_apl_ann(DocIdx, N, UpNodes) ->
{ok, CHBin} = riak_core_ring_manager:get_chash_bin(),
get_apl_ann_chbin(DocIdx, N, CHBin, UpNodes).
@@ -200,6 +208,9 @@ get_primary_apl(DocIdx, N, Ring, UpNodes) ->
%% @doc Return the first entry that is up in the preflist for `DocIdx'. This
%% will crash if all owning nodes are offline.
+-spec first_up(DocIdx :: docidx(),
+ Service :: atom()) -> {index(), node()}.
+
first_up(DocIdx, Service) ->
{ok, CHBin} = riak_core_ring_manager:get_chash_bin(),
Itr = chashbin:iterator(DocIdx, CHBin),
@@ -211,10 +222,20 @@ first_up(DocIdx, Service) ->
Itr),
chashbin:itr_value(Itr2).
+%% @doc Return a list of owners that are not up.
+%% @param Service on which nodes are running or list of up nodes.
+%% @return List of all indices with owners that are currently not up.
+-spec offline_owners(Service :: atom() |
+ [node()]) -> [{index(), node()}].
+
offline_owners(Service) ->
{ok, CHBin} = riak_core_ring_manager:get_chash_bin(),
offline_owners(Service, CHBin).
+%% @doc Returns list of all owners that are curently not up.
+-spec offline_owners(atom() | [node()],
+ CHBin :: chashbin()) -> [{index(), node()}].
+
offline_owners(Service, CHBin) when is_atom(Service) ->
UpSet =
ordsets:from_list(riak_core_node_watcher:nodes(Service)),
@@ -285,6 +306,9 @@ find_fallbacks_chbin([{Partition, _Node} | Rest] =
end.
%% @doc Return true if a node is up.
+-spec is_up(Node :: node(),
+ UpNodes :: [node()]) -> boolean().
+
is_up(Node, UpNodes) -> lists:member(Node, UpNodes).
%% @doc Return annotated preflist with partition ids/nums instead of hashes.
@@ -297,6 +321,10 @@ apl_with_partition_nums(Apl, Size) ->
Ann}
|| {{Hash, Node}, Ann} <- Apl].
+%% ===================================================================
+%% EUnit tests
+%% ===================================================================
+
-ifdef(TEST).
smallest_test() ->
diff --git a/src/riak_core_app.erl b/src/riak_core_app.erl
index 646887bbb..bec9e2726 100644
--- a/src/riak_core_app.erl
+++ b/src/riak_core_app.erl
@@ -22,6 +22,7 @@
-module(riak_core_app).
+
-behaviour(application).
%% Application callbacks
@@ -31,13 +32,32 @@
%% Application callbacks
%% ===================================================================
+%% @doc Callback to start the riak_core_lite application. This starts all
+%% neccessary processes and needs to be executed before operating the
+%% system.
+%% @param StartType ignored.
+%% @param StartArgs ignored.
+%% Returns `{ok, Pid}' if the start was succesful, otherwise `{error, Reason}'.
+-spec start(StartType :: application:start_type(),
+ StartArgs :: term()) -> {ok, pid()} | {error, term()}.
+
start(_StartType, _StartArgs) ->
ok = validate_ring_state_directory_exists(),
start_riak_core_sup().
+%% @doc Callback to stop the riak_core_lite application.
+%% @param State ignored.
+%% @returns `ok'.
+-spec stop(State :: term()) -> ok.
+
stop(_State) ->
logger:info("Stopped application riak_core", []), ok.
+%% @doc Start all application dependencies and try to read the ring directory.
+%% @returns `ok' if the directory exists and can be written to.
+%% @throws {error, invalid_ring_state_dir}
+-spec validate_ring_state_directory_exists() -> ok.
+
validate_ring_state_directory_exists() ->
riak_core_util:start_app_deps(riak_core),
{ok, RingStateDir} = application:get_env(riak_core,
@@ -54,6 +74,13 @@ validate_ring_state_directory_exists() ->
throw({error, invalid_ring_state_dir})
end.
+%% @doc Start the riak_core supervisor and register the ring event handler.
+%% @returns `{ok, Pid}' when the start was successful, `{error, Reason}'
+%% otherwise.
+%% @see riak_core_sup:init/1.
+-spec start_riak_core_sup() -> {ok, pid()} |
+ {error, term()}.
+
start_riak_core_sup() ->
%% Spin up the supervisor; prune ring files as necessary
case riak_core_sup:start_link() of
@@ -64,8 +91,14 @@ start_riak_core_sup() ->
{error, Reason} -> {error, Reason}
end.
+%% @doc Currently NoOp.
+-spec register_applications() -> ok.
+
register_applications() -> ok.
+%% @doc Add a standard handler to ring events.
+-spec add_ring_event_handler() -> ok.
+
add_ring_event_handler() ->
ok =
riak_core_ring_events:add_guarded_handler(riak_core_ring_handler,
diff --git a/src/riak_core_claim.erl b/src/riak_core_claim.erl
index 5edbaa1cc..46d57b16d 100644
--- a/src/riak_core_claim.erl
+++ b/src/riak_core_claim.erl
@@ -49,6 +49,8 @@
-module(riak_core_claim).
+-type ring() :: riak_core_ring:riak_core_ring().
+
-export([claim/1, claim/3, claim_until_balanced/2,
claim_until_balanced/4]).
@@ -66,8 +68,21 @@
-define(DEF_TARGET_N, 4).
+%% @doc Run the claim algorithm for the complete ring.
+%% @param Ring Ring the algorithm is run on.
+%% @returns The ring after the claim algorithm has been applied.
+-spec claim(Ring :: ring()) -> ring().
+
claim(Ring) -> claim(Ring, want, choose).
+%% @doc Run the claim algorithm for the complete ring.
+%% @param Ring Ring the algorithm is run on.
+%% @param Mode1 ignored.
+%% @param Mode2 ignored.
+%% @returns The ring after the claim algorithm has been applied.
+-spec claim(Ring :: ring(), Mode1 :: any(),
+ Mode2 :: any()) -> ring().
+
claim(Ring, _, _) ->
Members = riak_core_ring:claiming_members(Ring),
lists:foldl(fun (Node, Ring0) ->
@@ -75,9 +90,25 @@ claim(Ring, _, _) ->
end,
Ring, Members).
+%% @doc Apply the claim algorithm until a given node owns enough partitions.
+%% @param Ring Ring the algorithm is applied to.
+%% @param Node Node name of the node to be balanced.
+%% @returns The balanced ring.
+-spec claim_until_balanced(Ring :: ring(),
+ Node :: term()) -> ring().
+
claim_until_balanced(Ring, Node) ->
claim_until_balanced(Ring, Node, want, choose).
+%% @doc Apply the claim algorithm until a given node owns enough partitions.
+%% @param Ring Ring the algorithm is applied to.
+%% @param Node Node name of the node to be balanced.
+%% @param want Fixed guard.
+%% @param choose Fixed guard
+%% @returns The balanced ring.
+-spec claim_until_balanced(Ring :: ring(),
+ Node :: term(), want, choose) -> ring().
+
claim_until_balanced(Ring, Node, want, choose) ->
NeedsIndexes = wants_claim_v2(Ring, Node),
case NeedsIndexes of
@@ -91,27 +122,67 @@ claim_until_balanced(Ring, Node, want, choose) ->
%% Claim Function Implementations
%% ===================================================================
-%% @spec default_choose_claim(riak_core_ring()) -> riak_core_ring()
-%% @doc Choose a partition at random.
+%% @doc Choose a partition at random for the local node.
+%% @param Ring Ring to claim on.
+%% @returns Updated ring.
+-spec default_choose_claim(Ring :: ring()) -> ring().
+
default_choose_claim(Ring) ->
default_choose_claim(Ring, node()).
+%% @doc Choose a partition for a given node at random.
+%% @param Ring Ring to claim on.
+%% @param Node Node to claim for.
+%% @returns Updated ring.
+-spec default_choose_claim(Ring :: ring(),
+ Node :: term()) -> ring().
+
default_choose_claim(Ring, Node) ->
choose_claim_v2(Ring, Node).
+%% @doc Choose a partition for a given node according to the given parameters.
+%% @param Ring Ring to claim on.
+%% @param Node Node to claim for.
+%% @param Params Parameters to consider.
+%% @returns Updated ring.
+-spec default_choose_claim(Ring :: ring(),
+ Node :: term(), Params :: [term()]) -> ring().
+
default_choose_claim(Ring, Node, Params) ->
choose_claim_v2(Ring, Node, Params).
-%% @spec default_wants_claim(riak_core_ring()) -> {yes, integer()} | no
%% @doc Want a partition if we currently have less than floor(ringsize/nodes).
+%% @param Ring Ring to claim on.
+%% @returns `{yes, Difference}' or `no'.
+-spec default_wants_claim(Ring :: ring()) -> {yes,
+ integer()} |
+ no.
+
default_wants_claim(Ring) ->
default_wants_claim(Ring, node()).
+%% @doc Like {@link default_wants_claim/1} with a given node.
+%% @param Node Node to decide balance for.
+-spec default_wants_claim(Ring :: ring(),
+ Node :: term()) -> {yes, integer()} | no.
+
default_wants_claim(Ring, Node) ->
wants_claim_v2(Ring, Node).
+%% @doc Decide if the local node needs more partitions.
+%% @param Ring Ring to claim on.
+%% @returns `{yes, Difference}' or `no'.
+-spec wants_claim_v2(Ring :: ring()) -> {yes,
+ integer()} |
+ no.
+
wants_claim_v2(Ring) -> wants_claim_v2(Ring, node()).
+%% @doc Like {@link wants_claim_v2/1} for another node.
+%% @param Node Node to decide balance for.
+-spec wants_claim_v2(Ring :: ring(),
+ Node :: term()) -> {yes, integer()} | no.
+
wants_claim_v2(Ring, Node) ->
Active = riak_core_ring:claiming_members(Ring),
Owners = riak_core_ring:all_owners(Ring),
@@ -125,9 +196,17 @@ wants_claim_v2(Ring, Node) ->
true -> {yes, Avg - Count}
end.
-%% Provide default choose parameters if none given
+%% @doc Provide default choose parameters if none given
+-spec default_choose_params() -> [term()].
+
default_choose_params() -> default_choose_params([]).
+%% @doc Provide default NVal if it is not contained in the given parameters.
+%% @param Params List of claim parameters.
+%% @returns List of claim parameters containing target NVal.
+-spec default_choose_params(Params ::
+ term()) -> [term()].
+
default_choose_params(Params) ->
case proplists:get_value(target_n_val, Params) of
undefined ->
@@ -137,12 +216,27 @@ default_choose_params(Params) ->
_ -> Params
end.
+%% @doc Choose a partition the local node should claim.
+%% @param Ring Ring to claim on.
+%% @returns Updated ring.
+-spec choose_claim_v2(Ring :: ring()) -> ring().
+
choose_claim_v2(Ring) -> choose_claim_v2(Ring, node()).
+%% @doc Like {@link choose_claim_v2/1} with a specified node.
+%% @param Node Specified node that claims a partition.
+-spec choose_claim_v2(Ring :: ring(),
+ Node :: term()) -> ring().
+
choose_claim_v2(Ring, Node) ->
Params = default_choose_params(),
choose_claim_v2(Ring, Node, Params).
+%% @doc Like {@link choose_claim_v2/2} with specified parameters.
+%% @param Params0 Claim parameter list.
+-spec choose_claim_v2(Ring :: ring(), Node :: term(),
+ Params0 :: [term()]) -> ring().
+
choose_claim_v2(Ring, Node, Params0) ->
Params = default_choose_params(Params0),
%% Active::[node()]
@@ -322,11 +416,26 @@ increase_takes([{Node, Own, Delta} | Rest], N, Max, Acc)
increase_takes([NodeDelta | Rest], N, Max, Acc) ->
increase_takes(Rest, N, Max, [NodeDelta | Acc]).
+%% @doc Check if the given ring can provide enough owners for each node to meet
+%% the target NVal.
+%% @param Ring Ring to check.
+%% @param TargetN NVal to check.
+%% @returns Boolean indicating if the ring meets the requirement.
+-spec meets_target_n(Ring :: ring(),
+ TargetN :: pos_integer()) -> boolean().
+
meets_target_n(Ring, TargetN) ->
Owners = lists:keysort(1,
riak_core_ring:all_owners(Ring)),
meets_target_n(Owners, TargetN, 0, [], []).
+%% @private
+%% @doc Helper function for {@link meets_target_n/2}.
+-spec meets_target_n(Owners :: [{integer(), term()}],
+ TargetN :: pos_integer(), Index :: non_neg_integer(),
+ First :: [{integer(), term()}],
+ Last :: [{integer(), term()}]) -> boolean().
+
meets_target_n([{Part, Node} | Rest], TargetN, Index,
First, Last) ->
case lists:keytake(Node, 1, Last) of
@@ -355,11 +464,22 @@ meets_target_n([], TargetN, Index, First, Last) ->
Last),
{true, [Part || {_, _, Part} <- Violations]}.
-%% Claim diversify tries to build a perfectly diverse ownership list that meets
-%% target N. It uses wants to work out which nodes want partitions, but does
-%% not honor the counts currently. The algorithm incrementally builds the ownership
-%% list, updating the adjacency matrix needed to compute the diversity score as each
-%% node is added and uses it to drive the selection of the next nodes.
+%% @doc Claim diversify tries to build a perfectly diverse ownership list that
+%% meets target N. It uses wants to work out which nodes want partitions,
+%% but does not honor the counts currently. The algorithm incrementally
+%% builds the ownership list, updating the adjacency matrix needed to
+%% compute the diversity score as each node is added and uses it to drive
+%% the selection of the next nodes.
+%% @param Wants List of Node names and the respective number of partition they
+%% want to claim.
+%% @param Owners List of indices and the name of their owning node.
+%% @param Params Parameters.
+%% @returns New owner list and a list of attributes, in this case `diversified'.
+-spec claim_diversify(Wants :: [{term(), integer()}],
+ Owners :: [{integer(), term()}],
+ Params :: [term()]) -> {[{integer(), term()}],
+ [atom()]}.
+
claim_diversify(Wants, Owners, Params) ->
TN = proplists:get_value(target_n_val, Params,
?DEF_TARGET_N),
@@ -370,8 +490,18 @@ claim_diversify(Wants, Owners, Params) ->
Claiming, TN),
{NewOwners, [diversified]}.
-%% Claim nodes in seq a,b,c,a,b,c trying to handle the wraparound
-%% case to meet target N
+%% @doc Claim nodes in seq a,b,c,a,b,c trying to handle the wraparound case to
+%% meet target N
+%% @param Wants List of Node names and the respective number of partition they
+%% want to claim.
+%% @param Owners List of indices and the name of their owning node.
+%% @param Params Parameters.
+%% @returns Diagonalized list of owners and a list of attributes, in this case
+%% `diagonalized'.
+-spec claim_diagonal(Wants :: [{term(), integer()}],
+ Owners :: [{integer(), term()}],
+ Params :: [term()]) -> {[term()], [atom()]}.
+
claim_diagonal(Wants, Owners, Params) ->
TN = proplists:get_value(target_n_val, Params,
?DEF_TARGET_N),
@@ -498,6 +628,14 @@ backfill_ring(RingSize, Nodes, Remaining, Acc) ->
backfill_ring(RingSize, Nodes, Remaining - 1,
[Nodes | Acc]).
+%% @doc Rebalance the expected load on nodes using a diagonal stripe.
+%% @param Ring :: Ring to rebalance.
+%% @param Node :: Node to rebalance from.
+%% @returns Rebalanced ring.
+%% @see diagonal_stripe/2.
+-spec claim_rebalance_n(Ring :: ring(),
+ Node :: term()) -> ring().
+
claim_rebalance_n(Ring, Node) ->
Nodes = lists:usort([Node
| riak_core_ring:claiming_members(Ring)]),
@@ -507,6 +645,14 @@ claim_rebalance_n(Ring, Node) ->
end,
Ring, Zipped).
+%% @doc Creates a diagonal stripw of the given nodes over the partitions of the
+%% ring.
+%% @param Ring Ring on which the stripes are built.
+%% @param Nodes Nodes that are to be distributed.
+%% @returns List of indices and assigned nodes.
+-spec diagonal_stripe(Ring :: ring(),
+ Nodes :: [term()]) -> [{integer(), term()}].
+
diagonal_stripe(Ring, Nodes) ->
%% diagonal stripes guarantee most disperse data
Partitions = lists:sort([I
@@ -520,20 +666,39 @@ diagonal_stripe(Ring, Nodes) ->
1, length(Partitions))),
Zipped.
+%% @doc Choose a random partition for the local node.
+%% @param Ring Ring to claim on.
+%% @returns Updated ring.
+-spec random_choose_claim(Ring :: ring()) -> ring().
+
random_choose_claim(Ring) ->
random_choose_claim(Ring, node()).
+%% @doc Like {@link random_choose_claim/1} with a specified node.
+%% @param Node Node to choose a partition for.
+-spec random_choose_claim(Ring :: ring(),
+ Node :: term()) -> ring().
+
random_choose_claim(Ring, Node) ->
random_choose_claim(Ring, Node, []).
+%% @doc Like {@link random_choose_claim/2} with specified parameters.
+%% @param Params List of parameters, currently ignored.
+-spec random_choose_claim(Ring :: ring(),
+ Node :: term(), Params :: [term()]) -> ring().
+
random_choose_claim(Ring, Node, _Params) ->
riak_core_ring:transfer_node(riak_core_ring:random_other_index(Ring),
Node, Ring).
-%% @spec never_wants_claim(riak_core_ring()) -> no
%% @doc For use by nodes that should not claim any partitions.
+-spec never_wants_claim(ring()) -> no.
+
never_wants_claim(_) -> no.
+%% @doc For use by nodes that should not claim any partitions.
+-spec never_wants_claim(ring(), term()) -> no.
+
never_wants_claim(_, _) -> no.
%% ===================================================================
@@ -563,7 +728,6 @@ find_violations(Ring, TargetN) ->
lists:reverse(Bad).
%% @private
-%%
%% @doc Counts up the number of partitions owned by each node.
-spec get_counts([node()],
[{integer(), _}]) -> [{node(), non_neg_integer()}].
@@ -580,6 +744,12 @@ get_counts(Nodes, Ring) ->
dict:to_list(Counts).
%% @private
+%% @doc Add default delta values for all owners to the delta list.
+-spec add_default_deltas(IdxOwners :: [{integer(),
+ term()}],
+ Deltas :: [{term(), integer()}],
+ Default :: integer()) -> [{term(), integer()}].
+
add_default_deltas(IdxOwners, Deltas, Default) ->
{_, Owners} = lists:unzip(IdxOwners),
Owners2 = lists:usort(Owners),
@@ -587,9 +757,15 @@ add_default_deltas(IdxOwners, Deltas, Default) ->
lists:ukeysort(1, Deltas ++ Defaults).
%% @private
-%%
-%% @doc Filter out candidate indices that would violate target_n given
-%% a node's current partition ownership.
+%% @doc Filter out candidate indices that would violate target_n given a node's
+%% current partition ownership.
+-spec prefilter_violations(Ring :: ring(),
+ Node :: term(), AllIndices :: [{term(), integer()}],
+ Indices :: [{term(), integer()}],
+ TargetN :: pos_integer(),
+ RingSize :: non_neg_integer()) -> [{term(),
+ integer()}].
+
prefilter_violations(Ring, Node, AllIndices, Indices,
TargetN, RingSize) ->
CurrentIndices = riak_core_ring:indices(Ring, Node),
@@ -603,7 +779,6 @@ prefilter_violations(Ring, Node, AllIndices, Indices,
CurrentNth)].
%% @private
-%%
%% @doc Select indices from a given candidate set, according to two
%% goals.
%%
@@ -614,6 +789,12 @@ prefilter_violations(Ring, Node, AllIndices, Indices,
%% expected ownership. In other words, if A owns 5 partitions and
%% the desired ownership is 3, then we try to claim at most 2 partitions
%% from A.
+-spec select_indices(Owners :: [],
+ Deltas :: [{term(), integer()}],
+ Indices :: [{term(), integer()}],
+ TargetN :: pos_integer(),
+ RingSize :: pos_integer()) -> [integer()].
+
select_indices(_Owners, _Deltas, [], _TargetN,
_RingSize) ->
[];
@@ -653,8 +834,11 @@ select_indices(Owners, Deltas, Indices, TargetN,
lists:reverse(Claim).
%% @private
-%%
%% @doc Determine if two positions in the ring meet target_n spacing.
+-spec spaced_by_n(Ntha :: integer(), NthB :: integer(),
+ TargetN :: pos_integer(),
+ RingSize :: pos_integer()) -> boolean().
+
spaced_by_n(NthA, NthB, TargetN, RingSize) ->
case NthA > NthB of
true ->
@@ -664,8 +848,13 @@ spaced_by_n(NthA, NthB, TargetN, RingSize) ->
end,
(NFwd >= TargetN) and (NBack >= TargetN).
-%% For each node in wants, work out how many more partition each node wants (positive) or is
-%% overloaded by (negative) compared to what it owns.
+%% @doc For each node in wants, work out how many more partition each node wants
+%% (positive) or is overloaded by (negative) compared to what it owns.
+%% @param Wants List of node names and their target number of partitions.
+%% @param Owns List of node names and their actual number of partitions.
+-spec wants_owns_diff(Wants :: [{term(), integer()}],
+ Owns :: [{term(), integer()}]) -> [{term(), integer()}].
+
wants_owns_diff(Wants, Owns) ->
[case lists:keyfind(N, 1, Owns) of
{N, O} -> {N, W - O};
@@ -673,8 +862,12 @@ wants_owns_diff(Wants, Owns) ->
end
|| {N, W} <- Wants].
-%% Given a ring, work out how many partition each wants to be
-%% considered balanced
+%% @doc Given a ring, work out how many partition each wants to be
+%% considered balanced.
+%% @param Ring Ring to figure out wants for.
+%% @returns List of node names and the number of wanted partitions.
+-spec wants(Ring :: ring()) -> [{term(), integer()}].
+
wants(Ring) ->
Active =
lists:sort(riak_core_ring:claiming_members(Ring)),
@@ -686,8 +879,11 @@ wants(Ring) ->
lists:sort(ActiveWants ++ InactiveWants).
%% @private
-%% Given a number of nodes and ring size, return a list of
+%% @doc Given a number of nodes and ring size, return a list of
%% desired ownership, S long that add up to Q
+-spec wants_counts(S :: non_neg_integer(),
+ Q :: non_neg_integer()) -> [integer()].
+
wants_counts(S, Q) ->
Max = roundup(Q / S),
case S * Max - Q of
@@ -697,7 +893,10 @@ wants_counts(S, Q) ->
lists:duplicate(S - X, Max)
end.
-%% Round up to next whole integer - ceil
+%% @private
+%% @doc Round up to next whole integer - ceil
+-spec roundup(float()) -> integer().
+
roundup(I) when I >= 0 ->
T = erlang:trunc(I),
case I - T of
diff --git a/src/riak_core_claimant.erl b/src/riak_core_claimant.erl
index 90a49bbda..e4409e7d9 100644
--- a/src/riak_core_claimant.erl
+++ b/src/riak_core_claimant.erl
@@ -37,7 +37,8 @@
handle_info/2, terminate/2, code_change/3]).
-type action() :: leave | remove | {replace, node()} |
- {force_replace, node()}.
+ {force_replace, node()} | {resize, integer()} |
+ abort_resize.
-type
riak_core_ring() :: riak_core_ring:riak_core_ring().
@@ -47,10 +48,44 @@
-type ring_transition() :: {riak_core_ring(),
riak_core_ring()}.
+-type change() :: {node(), action()}.
+
+-type leave_request_error() :: not_member |
+ only_member | already_leaving.
+
+-type remove_request_error() :: is_claimant |
+ not_member | only_member.
+
+-type replace_request_error() :: not_member |
+ already_leaving | already_replacement |
+ invalid_replacement.
+
+-type force_replace_request_error() :: not_member |
+ is_claimant | already_replacement |
+ invalid_replacement.
+
+-type resize_request_error() :: same_size |
+ single_node | pending_changes.
+
+-type resize_abort_request_error() :: not_resizing.
+
+-type request_error() :: leave_request_error() |
+ remove_request_error() | replace_request_error() |
+ force_replace_request_error() |
+ resize_request_error() | resize_abort_request_error().
+
+-type commit_error() :: nothing_planned |
+ invalid_resize_claim | ring_not_ready | plan_changed.
+
+-type log() :: fun((atom(), term()) -> ok).
+
+-type next() :: [{integer(), term(), term(), [module()],
+ awaiting | complete}].
+
-record(state,
{last_ring_id,
%% The set of staged cluster changes
- changes :: [{node(), action()}],
+ changes :: [change()],
%% Ring computed during the last planning stage based on
%% applying a set of staged cluster changes. When commiting
%% changes, the computed ring must match the previous planned
@@ -59,7 +94,9 @@
%% Random number seed passed to remove_node to ensure the
%% current randomized remove algorithm is deterministic
%% between plan and commit phases
- seed}).
+ seed :: erlang:timestamp()}).
+
+-type state() :: #state{}.
-define(ROUT(S, A),
ok).%%-define(ROUT(S,A),?debugFmt(S,A)).
@@ -70,6 +107,9 @@
%%%===================================================================
%% @doc Spawn and register the riak_core_claimant server
+-spec start_link() -> {ok, pid()} | ignore |
+ {error, {already_started, pid()} | term()}.
+
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [],
[]).
@@ -79,8 +119,11 @@ start_link() ->
%% modifications that correspond to each resulting cluster transition
%% (eg. the initial transition that applies the staged changes, and
%% any additional transitions triggered by later rebalancing).
--spec plan() -> {error, term()} |
- {ok, [action()], [ring_transition()]}.
+%% @returns `{ok, Changes, NextRings}' if the plan can be generated,
+%% `{error, Reason}' otherwise.
+-spec plan() -> {error,
+ ring_not_ready | invalid_resize_claim} |
+ {ok, [change()], [riak_core_ring()]}.
plan() -> gen_server:call(claimant(), plan, infinity).
@@ -88,7 +131,9 @@ plan() -> gen_server:call(claimant(), plan, infinity).
%% A commit is only allowed to succeed if the ring is ready and if the
%% current set of changes matches those computed by the most recent
%% call to plan/0.
--spec commit() -> ok | {error, term()}.
+%% @returns `ok' if the plan is committed successfully, `{error, Reason}' or
+%% just `error' otherwise.
+-spec commit() -> ok | error | {error, commit_error()}.
commit() ->
gen_server:call(claimant(), commit, infinity).
@@ -96,18 +141,35 @@ commit() ->
%% @doc Stage a request for `Node' to leave the cluster. If committed, `Node'
%% will handoff all of its data to other nodes in the cluster and then
%% shutdown.
+%% @param Node Node to leave the cluster.
+%% @returns `ok' If the staging was successful, `{error, Reason}' otherwise.
+-spec leave_member(Node :: node()) -> ok |
+ {error, leave_request_error()}.
+
leave_member(Node) -> stage(Node, leave).
%% @doc Stage a request for `Node' to be forcefully removed from the cluster.
%% If committed, all partitions owned by `Node' will immediately be
%% re-assigned to other nodes. No data on `Node' will be transfered to
%% other nodes, and all replicas on `Node' will be lost.
+%% @param Node Node to be removed from the cluster.
+%% @returns `ok' if the staging was successful, `{error, Reason}' otherwise.
+-spec remove_member(Node :: node()) -> ok |
+ {error, remove_request_error()}.
+
remove_member(Node) -> stage(Node, remove).
%% @doc Stage a request for `Node' to be replaced by `NewNode'. If committed,
%% `Node' will handoff all of its data to `NewNode' and then shutdown.
%% The current implementation requires `NewNode' to be a fresh node that
%% is joining the cluster and does not yet own any partitions of its own.
+%% @param Node Node to be replaced.
+%% @param NewNode Node to replace the old node.
+%% @returns `ok' if the staging was successful, `{error, Reason}' otherwise.
+-spec replace(Node :: node(), NewNode :: node()) -> ok |
+ {error,
+ replace_request_error()}.
+
replace(Node, NewNode) ->
stage(Node, {replace, NewNode}).
@@ -117,6 +179,13 @@ replace(Node, NewNode) ->
%% and all replicas on `Node' will be lost. The current implementation
%% requires `NewNode' to be a fresh node that is joining the cluster
%% and does not yet own any partitions of its own.
+%% @param Node Node to be replaced.
+%% @param NewNode Node to replace the old node.
+%% @returns `ok' if the staging was successful, `{error, Reason}' otherwise.
+-spec force_replace(Node :: node(),
+ NewNode :: node()) -> ok |
+ {error, replace_request_error()}.
+
force_replace(Node, NewNode) ->
stage(Node, {force_replace, NewNode}).
@@ -127,17 +196,27 @@ force_replace(Node, NewNode) ->
%% After completion, the new ring is installed and data is safely
%% removed from partitons no longer owner by a node or present
%% in the ring.
--spec resize_ring(integer()) -> ok | {error, atom()}.
+%% @param NewRingSize Number of partitions the ring should be resized to.
+%% @returns `ok' if the staging was successful, `{error, Reason}' otherwise.
+-spec resize_ring(integer()) -> ok |
+ {error, resize_request_error()}.
resize_ring(NewRingSize) ->
%% use the node making the request. it will be ignored
stage(node(), {resize, NewRingSize}).
--spec abort_resize() -> ok | {error, atom()}.
+%% @doc Stage a request to abort a resize operation. If committed, the installed
+%% ring will stay the same.
+%% @returns `ok' if the staging was successful, `{error, Reason}' otherwise.
+-spec abort_resize() -> ok |
+ {error, resize_abort_request_error()}.
abort_resize() -> stage(node(), abort_resize).
%% @doc Clear the current set of staged transfers
+%% @returns `ok'.
+-spec clear() -> ok.
+
clear() -> gen_server:call(claimant(), clear, infinity).
%% @doc This function is called as part of the ring reconciliation logic
@@ -149,6 +228,10 @@ clear() -> gen_server:call(claimant(), clear, infinity).
%% --> riak_core_ring:ring_changed/2
%% -----> riak_core_ring:internal_ring_changed/2
%% --------> riak_core_claimant:ring_changed/2
+%% @returns The ring with the changes applied to.
+-spec ring_changed(Node :: node(),
+ Ring :: riak_core_ring()) -> riak_core_ring().
+
ring_changed(Node, Ring) ->
internal_ring_changed(Node, Ring).
@@ -156,6 +239,14 @@ ring_changed(Node, Ring) ->
%%% Claim sim helpers until refactor
%%%===================================================================
+%% @doc Assign indies owned by replaced nodes to the nodes replacing them.
+%% @param CState Ring on whihc the indices are to be reassigned.
+%% @returns `{Changed, NewRing}', indicating if there has been changes and the
+%% resulting ring.
+-spec reassign_indices(CState ::
+ riak_core_ring:riak_core_ring()) -> {boolean(),
+ riak_core_ring()}.
+
reassign_indices(CState) ->
reassign_indices(CState, [], erlang:timestamp(),
fun no_log/2).
@@ -164,10 +255,23 @@ reassign_indices(CState) ->
%%% Internal API helpers
%%%===================================================================
+%% @private
+%% @doc Stage the given action to be executed with the next commit.
+%% @param Node Node requesting the stage.
+%% @param Action Action to be staged.
+%% @returns `ok' if the staging was successful, `{error, Reason}' otherwise.
+-spec stage(Node :: node(), Action :: action()) -> ok |
+ {error, request_error()}.
+
stage(Node, Action) ->
gen_server:call(claimant(), {stage, Node, Action},
infinity).
+%% @private
+%% @doc Retrieve a reference to the current claimant.
+%% @returns CUrrent claimant.
+-spec claimant() -> {module(), term()}.
+
claimant() ->
{ok, Ring} = riak_core_ring_manager:get_my_ring(),
{?MODULE, riak_core_ring:claimant(Ring)}.
@@ -176,10 +280,23 @@ claimant() ->
%%% gen_server callbacks
%%%===================================================================
+%% @doc Callback for gen_server.
+%% @see gen_server:start_link/3
+%% @see gen_server:start_link/4.
+-spec init(Args :: []) -> {ok, state()}.
+
init([]) ->
schedule_tick(),
{ok, #state{changes = [], seed = erlang:timestamp()}}.
+%% @doc Callback for gen_server.
+%% @see gen_server:call/2.
+%% @see gen_server:call/3.
+-spec handle_call(Call :: term(),
+ From :: {pid(), term()}, State :: state()) -> {reply,
+ term(),
+ state()}.
+
handle_call(clear, _From, State) ->
State2 = clear_staged(State), {reply, ok, State2};
handle_call({stage, Node, Action}, _From, State) ->
@@ -202,8 +319,18 @@ handle_call(commit, _From, State) ->
handle_call(_Request, _From, State) ->
Reply = ok, {reply, Reply, State}.
+%% @doc Callback for gen_server. Not implemented.
+%% @see gen_server:cast/2.
+-spec handle_cast(Msg :: term(),
+ State :: state()) -> {noreply, state()}.
+
handle_cast(_Msg, State) -> {noreply, State}.
+%% @doc Callback for gen_server.
+%% @see gen_server.
+-spec handle_info(Info :: term(),
+ State :: state()) -> {noreply, state()}.
+
handle_info(tick, State) ->
State2 = tick(State), {noreply, State2};
handle_info(reset_ring_id, State) ->
@@ -211,8 +338,19 @@ handle_info(reset_ring_id, State) ->
{noreply, State2};
handle_info(_Info, State) -> {noreply, State}.
+%% @doc Callback for gen_server. Not implemented.
+%% @see gen_server:stop/1.
+%% @see gen_server:stop/2.
+-spec terminate(Reason :: term(),
+ State :: state()) -> ok.
+
terminate(_Reason, _State) -> ok.
+%% @doc Callback for gen_server. Not implemented.
+%% @see gen_server.
+-spec code_change(OldVsn :: term() | {down, term()},
+ State :: state(), Extra :: term()) -> {ok, state()}.
+
code_change(_OldVsn, State, _Extra) -> {ok, State}.
%%%===================================================================
@@ -222,6 +360,12 @@ code_change(_OldVsn, State, _Extra) -> {ok, State}.
%% @private
%% @doc Verify that a cluster change request is valid and add it to
%% the list of staged changes.
+-spec maybe_stage(Node :: node(), Action :: action(),
+ Ring :: riak_core_ring(), State :: state()) -> {ok |
+ {error,
+ request_error()},
+ state()}.
+
maybe_stage(Node, Action, Ring,
State = #state{changes = Changes}) ->
case valid_request(Node, Action, Changes, Ring) of
@@ -236,6 +380,12 @@ maybe_stage(Node, Action, Ring,
%% @private
%% @doc Determine how the staged set of cluster changes will affect
%% the cluster. See {@link plan/0} for additional details.
+-spec generate_plan(Ring :: riak_core_ring(),
+ State :: state()) -> {{ok, [change()],
+ [riak_core_ring()]} |
+ {error, invalid_resize_claim},
+ state()}.
+
generate_plan(Ring,
State = #state{changes = Changes}) ->
Changes2 = filter_changes(Changes, Ring),
@@ -245,6 +395,16 @@ generate_plan(Ring,
State2 = State#state{changes = Changes2},
generate_plan(AllChanges, Ring, State2).
+%% @private
+%% @see generate_plan/2.
+-spec generate_plan(Changes :: [change()],
+ Ring :: riak_core_ring(), State :: state()) -> {{ok,
+ [change()],
+ [riak_core_ring()]} |
+ {error,
+ invalid_resize_claim},
+ state()}.
+
generate_plan([], _, State) ->
%% There are no changes to apply
{{ok, [], []}, State};
@@ -263,6 +423,10 @@ generate_plan(Changes, Ring,
%% @private
%% @doc Commit the set of staged cluster changes. See {@link commit/0}
%% for additional details.
+-spec commit_staged(State :: state()) -> {ok | error,
+ state()} |
+ {{error, commit_error()}, state()}.
+
commit_staged(State = #state{next_ring = undefined}) ->
{{error, nothing_planned}, State};
commit_staged(State) ->
@@ -276,11 +440,27 @@ commit_staged(State) ->
end.
%% @private
+%% @see commit_staged/1.
+-spec maybe_commit_staged(State :: state()) -> {ok,
+ riak_core_ring()} |
+ not_changed |
+ {not_changed,
+ invalid_resize_claim |
+ ring_not_ready | plan_changed}.
+
maybe_commit_staged(State) ->
riak_core_ring_manager:ring_trans(fun maybe_commit_staged/2,
State).
%% @private
+%% @see maybe_commit_staged/1.
+-spec maybe_commit_staged(Ring :: riak_core_ring(),
+ State :: state()) -> {new_ring, riak_core_ring()} |
+ ignore |
+ {ignore,
+ invalid_resize_claim |
+ ring_not_ready | plan_changed}.
+
maybe_commit_staged(Ring,
State = #state{changes = Changes, seed = Seed}) ->
Changes2 = filter_changes(Changes, Ring),
@@ -292,6 +472,14 @@ maybe_commit_staged(Ring,
end.
%% @private
+%% @see maybe_commit_staged/2.
+-spec maybe_commit_staged(Ring :: riak_core_ring(),
+ NextRing :: riak_core_ring(),
+ State :: state()) -> {new_ring, riak_core_ring()} |
+ ignore |
+ {ignore,
+ ring_not_ready | plan_changed}.
+
maybe_commit_staged(Ring, NextRing,
#state{next_ring = PlannedRing}) ->
Claimant = riak_core_ring:claimant(Ring),
@@ -314,16 +502,31 @@ maybe_commit_staged(Ring, NextRing,
%% staged action, the only way to clear pending joins is to remove
%% the `joining' nodes from the cluster. Used by the public API
%% call {@link clear/0}.
+-spec clear_staged(State :: state()) -> state().
+
clear_staged(State) ->
remove_joining_nodes(),
State#state{changes = [], seed = erlang:timestamp()}.
%% @private
+-spec remove_joining_nodes() -> {ok, riak_core_ring()} |
+ not_changed.
+
remove_joining_nodes() ->
riak_core_ring_manager:ring_trans(fun remove_joining_nodes/2,
ok).
%% @private
+%% @doc Removes nodes that are currently joining from the ring. Used as a
+%% callback for {@link riak_core_ring_manager:ring_trans()}.
+%% @param Ring Ring to remove nodes from.
+%% @param Args ignored, exists to conform to the callback function form.
+%% @returns `new_ring, Ring' if the removal can be done, `ignore' if this is not
+%% the claimant or there are no joining nodes.
+-spec remove_joining_nodes(Ring :: riak_core_ring(),
+ Args :: any()) -> {new_ring, riak_core_ring()} |
+ ignore.
+
remove_joining_nodes(Ring, _) ->
Claimant = riak_core_ring:claimant(Ring),
IsClaimant = Claimant == node(),
@@ -338,6 +541,13 @@ remove_joining_nodes(Ring, _) ->
end.
%% @private
+%% @doc Helper for remove_joining_nodes/2.
+%% @see remove_joining_nodes/2.
+-spec remove_joining_nodes_from_ring(Claimant :: term(),
+ Joining :: [node()],
+ Ring ::
+ riak_core_ring()) -> riak_core_ring().
+
remove_joining_nodes_from_ring(Claimant, Joining,
Ring) ->
NewRing = lists:foldl(fun (Node, RingAcc) ->
@@ -351,6 +561,23 @@ remove_joining_nodes_from_ring(Claimant, Joining,
NewRing2.
%% @private
+%% @doc Check if the given request is valid for the current state.
+%% @param Node Node involved in the action.
+%% @param Action Requested action.
+%% @param Changes List of changes staged.
+%% @param Ring Ring action should be taken on.
+%% @returns `true' if the request is valid, `{error, Reason}' otherwise.
+%% @see valid_leave_request/2.
+%% @see valid_remove_request/2.
+%% @see valid_replace_request/4.
+%% @see valid_force_replace_request/4.
+%% @see valid_resize_request/3.
+%% @see valid_resize_abort_request/1.
+-spec valid_request(Node :: node(), Action :: action(),
+ Changes :: [change()],
+ Ring :: riak_core_ring()) -> true |
+ {error, request_error()}.
+
valid_request(Node, Action, Changes, Ring) ->
case Action of
leave -> valid_leave_request(Node, Ring);
@@ -366,6 +593,18 @@ valid_request(Node, Action, Changes, Ring) ->
end.
%% @private
+%% @doc Check if a leave request is valid. A leave request is valid if the
+%% leaving node is valid, a mamber, not the only member, and not already
+%% leaving.
+%% @param Node Node to leave.
+%% @param Ring Riing the node should leave from.
+%% @returns `true' if the request is valid, `{error, Reason}' otherwise.
+%% @see leave_member/1.
+-spec valid_leave_request(Node :: node(),
+ Ring :: riak_core_ring()) -> true |
+ {error,
+ leave_request_error()}.
+
valid_leave_request(Node, Ring) ->
case {riak_core_ring:all_members(Ring),
riak_core_ring:member_status(Ring, Node)}
@@ -378,6 +617,18 @@ valid_leave_request(Node, Ring) ->
end.
%% @private
+%% @doc Check if a remove request is valid. A remove request is valid if the
+%% removed node is not the claimant, is a member, and is not the only
+%% member.
+%% @param Node Node to be removed.
+%% @param Ring Ring to remove the node from.
+%% @returns `true' if the request is valid, `{error, Reason}' otherwise.
+%% @see remove_member/1.
+-spec valid_remove_request(Node :: node(),
+ Ring :: riak_core_ring()) -> true |
+ {error,
+ remove_request_error()}.
+
valid_remove_request(Node, Ring) ->
IsClaimant = Node == riak_core_ring:claimant(Ring),
case {IsClaimant, riak_core_ring:all_members(Ring),
@@ -390,6 +641,21 @@ valid_remove_request(Node, Ring) ->
end.
%% @private
+%% @doc Check if a replace request is valid. A replace request is valid if the
+%% node to be replaced is a member, not already leaving or being replaced,
+%% and if the new node is freshly joining.
+%% @param Node Node to be replaced.
+%% @param NewNode Node to replace the old node.
+%% @param Changes Changes to determine currently staged replacements.
+%% @param Ring Ring to replace the node on.
+%% @returns `true' if the request is valid, `{error, Reason}' otherwise.
+%% @see replace/2.
+-spec valid_replace_request(Node :: node(),
+ NewNode :: node(), Changes :: [change()],
+ Ring :: riak_core_ring()) -> true |
+ {error,
+ replace_request_error()}.
+
valid_replace_request(Node, NewNode, Changes, Ring) ->
AlreadyReplacement = lists:member(NewNode,
existing_replacements(Changes)),
@@ -408,6 +674,21 @@ valid_replace_request(Node, NewNode, Changes, Ring) ->
end.
%% @private
+%% @doc Check if a force replace request is valid. A force replace request is
+%% valid if the node to be replaced is a member, not the claimant, not
+%% already being replaced, and if the new node is freshly joining.
+%% @param Node Node to be replaced.
+%% @param NewNode Node to replace the old node.
+%% @param Changes Changes to determine currently staged replacements.
+%% @param Ring Ring to replace the node on.
+%% @returns `true' if the request is valid, `{error, Reason}' otherwise.
+%% @see force_replace/2.
+-spec valid_force_replace_request(Node :: node(),
+ NewNode :: node(), Changes :: [change()],
+ Ring :: riak_core_ring()) -> true |
+ {error,
+ force_replace_request_error()}.
+
valid_force_replace_request(Node, NewNode, Changes,
Ring) ->
IsClaimant = Node == riak_core_ring:claimant(Ring),
@@ -429,7 +710,21 @@ valid_force_replace_request(Node, NewNode, Changes,
end.
%% @private
-%% restrictions preventing resize along with other operations are temporary
+%% @doc Check if a resize request is valid. A resize request is valid if the new
+%% size differs from the old one, there is more than one node on the ring,
+%% and there are no pending changes. Restrictions preventing resize along
+%% with other operations are temporary.
+%% @param NewRingSize Number of partitions after the resize operation.
+%% @param Changes List of changes to check for pending changes.
+%% @param Ring Ring to resize.
+%% @returns `true' if the request is valid, `{error, Reason}' otherwise.
+%% @see resize_ring/1.
+-spec valid_resize_request(NewRingSize :: pos_integer(),
+ Changes :: [change()],
+ Ring :: riak_core_ring()) -> true |
+ {error,
+ resize_request_error()}.
+
valid_resize_request(NewRingSize, [], Ring) ->
IsResizing = riak_core_ring:num_partitions(Ring) =/=
NewRingSize,
@@ -443,6 +738,16 @@ valid_resize_request(NewRingSize, [], Ring) ->
{_, _, true} -> {error, pending_changes}
end.
+%% @doc Check if a resize abort request is valid. A resize abort request is
+%% valid if the ring is actually resizing and not in post resize state.
+%% @param Ring Ring to abort the resize on.
+%% @returns `true' if the request is valid, `{error, Reason}' otherwise.
+%% @see abort_resize/0.
+-spec valid_resize_abort_request(Ring ::
+ riak_core_ring()) -> true |
+ {error,
+ resize_abort_request_error()}.
+
valid_resize_abort_request(Ring) ->
IsResizing = riak_core_ring:is_resizing(Ring),
IsPostResize = riak_core_ring:is_post_resize(Ring),
@@ -455,6 +760,9 @@ valid_resize_abort_request(Ring) ->
%% @doc Filter out any staged changes that are no longer valid. Changes
%% can become invalid based on other staged changes, or by cluster
%% changes that bypass the staging system.
+-spec filter_changes(Changes :: [change()],
+ Ring :: riak_core_ring()) -> [change()].
+
filter_changes(Changes, Ring) ->
orddict:filter(fun (Node, Change) ->
filter_changes_pred(Node, Change, Changes, Ring)
@@ -462,6 +770,11 @@ filter_changes(Changes, Ring) ->
Changes).
%% @private
+%% @doc Predicate function for {@link filter_changes/2}.
+-spec filter_changes_pred(Node :: node(),
+ Action :: action(), Changes :: [change()],
+ Ring :: riak_core_ring()) -> boolean().
+
filter_changes_pred(Node, {Change, NewNode}, Changes,
Ring)
when (Change == replace) or (Change == force_replace) ->
@@ -477,13 +790,20 @@ filter_changes_pred(Node, _, _, Ring) ->
IsMember.
%% @private
+%% @doc Compute nodes staged to replace another node.
+-spec existing_replacements(Changes ::
+ [change()]) -> [node()].
+
existing_replacements(Changes) ->
[Node
|| {_, {Change, Node}} <- Changes,
(Change == replace) or (Change == force_replace)].
%% @private
-%% Determine if two rings have logically equal cluster state
+%% @doc Determine if two rings have logically equal cluster state.
+-spec same_plan(RingA :: riak_core_ring(),
+ RingB :: riak_core_ring()) -> boolean().
+
same_plan(RingA, RingB) ->
riak_core_ring:all_member_status(RingA) ==
riak_core_ring:all_member_status(RingB)
@@ -494,11 +814,19 @@ same_plan(RingA, RingB) ->
riak_core_ring:pending_changes(RingA) ==
riak_core_ring:pending_changes(RingB).
+%% @private
+%% @doc Schedule a tick to be send to the claimant.
+-spec schedule_tick() -> reference().
+
schedule_tick() ->
Tick = application:get_env(riak_core, claimant_tick,
10000),
erlang:send_after(Tick, ?MODULE, tick).
+%% @private
+%% @doc Execute one claimant tick.
+-spec tick(State :: state()) -> state().
+
tick(State = #state{last_ring_id = LastID}) ->
case riak_core_ring_manager:get_ring_id() of
LastID -> schedule_tick(), State;
@@ -509,6 +837,12 @@ tick(State = #state{last_ring_id = LastID}) ->
State#state{last_ring_id = RingID}
end.
+%% @private
+%% @doc Force a ring update if this is the ring's claimant and the ring is
+%% ready.
+-spec maybe_force_ring_update(Ring ::
+ riak_core_ring()) -> ok.
+
maybe_force_ring_update(Ring) ->
IsClaimant = riak_core_ring:claimant(Ring) == node(),
IsReady = riak_core_ring:ring_ready(Ring),
@@ -521,6 +855,11 @@ maybe_force_ring_update(Ring) ->
false -> ok
end.
+%% @private
+%% @doc Force the ring update.
+-spec do_maybe_force_ring_update(Ring ::
+ riak_core_ring()) -> ok.
+
do_maybe_force_ring_update(Ring) ->
case compute_next_ring([], erlang:timestamp(), Ring) of
{ok, NextRing} ->
@@ -538,10 +877,27 @@ do_maybe_force_ring_update(Ring) ->
%% =========================================================================
%% @private
+%% @doc Compute a list of all next rings after applying the changes.
+-spec compute_all_next_rings(Changes :: [change()],
+ Seed :: erlang:timestamp(),
+ Ring :: riak_core_ring()) -> {ok,
+ [ring_transition()]} |
+ {error,
+ invalid_resize_claim}.
+
compute_all_next_rings(Changes, Seed, Ring) ->
compute_all_next_rings(Changes, Seed, Ring, []).
%% @private
+%% @doc Compute a list of all next rings after applying the changes.
+-spec compute_all_next_rings(Changes :: [change()],
+ Seed :: erlang:timestamp(),
+ Ring :: riak_core_ring(),
+ Acc :: [ring_transition()]) -> {ok,
+ [ring_transition()]} |
+ {error,
+ invalid_resize_claim}.
+
compute_all_next_rings(Changes, Seed, Ring, Acc) ->
case compute_next_ring(Changes, Seed, Ring) of
{error, invalid_resize_claim} = Err -> Err;
@@ -556,6 +912,13 @@ compute_all_next_rings(Changes, Seed, Ring, Acc) ->
end.
%% @private
+%% @doc Compute the next ring by applying all staged changes.
+-spec compute_next_ring(Changes :: [change()],
+ Seed :: erlang:timestamp(),
+ Ring :: riak_core_ring()) -> {ok, riak_core_ring()} |
+ {error,
+ invalid_resize_claim}.
+
compute_next_ring(Changes, Seed, Ring) ->
Replacing = [{Node, NewNode}
|| {Node, {replace, NewNode}} <- Changes],
@@ -570,6 +933,13 @@ compute_next_ring(Changes, Seed, Ring) ->
end.
%% @private
+%% @doc Return the resized ring if it is valid.
+-spec maybe_compute_resize(Orig :: riak_core_ring(),
+ MbResized :: riak_core_ring()) -> {true,
+ riak_core_ring()} |
+ {false,
+ riak_core_ring()}.
+
maybe_compute_resize(Orig, MbResized) ->
OrigSize = riak_core_ring:num_partitions(Orig),
NewSize = riak_core_ring:num_partitions(MbResized),
@@ -586,6 +956,9 @@ maybe_compute_resize(Orig, MbResized) ->
%% to determine the future ring but the changes are applied to
%% the currently installed ring (`Orig') so that the changes to
%% the chash are not committed to the ring manager
+-spec compute_resize(Orig :: riak_core_ring(),
+ Resized :: riak_core_ring()) -> riak_core_ring().
+
compute_resize(Orig, Resized) ->
%% need to operate on balanced, future ring (apply changes determined by claim)
CState0 = riak_core_ring:future_ring(Resized),
@@ -619,6 +992,13 @@ compute_resize(Orig, Resized) ->
%% @doc determine the first resize transfer a partition should perform with
%% the goal of ensuring the transfer will actually have data to send to the
%% target.
+-spec schedule_first_resize_transfer(Type :: smaller |
+ atom(),
+ IdxOwner :: {integer(), node()},
+ Owner :: node(),
+ Resized ::
+ riak_core_ring()) -> riak_core_ring().
+
schedule_first_resize_transfer(smaller,
{Idx, _} = IdxOwner, none, Resized) ->
%% partition no longer exists in shrunk ring, first successor will be
@@ -642,8 +1022,13 @@ schedule_first_resize_transfer(_,
riak_core_ring:schedule_resize_transfer(Resized,
IdxOwner, {Idx, NextOwner}).
+%% @private
%% @doc verify that resized ring was properly claimed (no owners are the dummy
%% resized owner) in both the current and future ring
+-spec validate_resized_ring(Ring ::
+ riak_core_ring()) -> {boolean(),
+ riak_core_ring()}.
+
validate_resized_ring(Ring) ->
FutureRing = riak_core_ring:future_ring(Ring),
Owners = riak_core_ring:all_owners(Ring),
@@ -662,6 +1047,10 @@ validate_resized_ring(Ring) ->
end.
%% @private
+%% @doc Apply the given changes to the ring.
+-spec apply_changes(Ring :: riak_core_ring(),
+ Changes :: [change()]) -> riak_core_ring().
+
apply_changes(Ring, Changes) ->
NewRing = lists:foldl(fun ({Node, Cmd}, RingAcc2) ->
RingAcc3 = change({Cmd, Node}, RingAcc2),
@@ -671,6 +1060,10 @@ apply_changes(Ring, Changes) ->
NewRing.
%% @private
+%% @doc Apply a change to the ring.
+-spec change(Change :: {action(), node()},
+ Ring :: riak_core_ring()) -> riak_core_ring().
+
change({join, Node}, Ring) ->
Ring2 = riak_core_ring:add_member(Node, Ring, Node),
Ring2;
@@ -708,6 +1101,11 @@ change({abort_resize, _Node}, Ring) ->
riak_core_ring:set_pending_resize_abort(Ring).
%%noinspection ErlangUnboundVariable
+%% @private
+%% @doc Update claimant with changed ring.
+-spec internal_ring_changed(Node :: node(),
+ CState :: riak_core_ring()) -> riak_core_ring().
+
internal_ring_changed(Node, CState) ->
{Changed, CState5} = do_claimant(Node, CState,
fun log/2),
@@ -771,6 +1169,12 @@ internal_ring_changed(Node, CState) ->
false -> CState5
end.
+%% @private
+%% @doc Tell newly exiting nodes to shut down.
+-spec inform_removed_nodes(Node :: node(),
+ OldRing :: riak_core_ring(),
+ NewRing :: riak_core_ring()) -> ok.
+
inform_removed_nodes(Node, OldRing, NewRing) ->
CName = riak_core_ring:cluster_name(NewRing),
Exiting = riak_core_ring:members(OldRing, [exiting]) --
@@ -785,13 +1189,36 @@ inform_removed_nodes(Node, OldRing, NewRing) ->
|| ExitingNode <- Changed],
ok.
+%% @private
+%% @doc Do claimant wihout logging.
+%% @see do_claimant/3.
+-spec do_claimant_quiet(Node :: node(),
+ CState :: riak_core_ring(),
+ Replacing :: orddict:orddict(node(), node()),
+ Seed :: erlang:timestamp()) -> {boolean(),
+ riak_core_ring()}.
+
do_claimant_quiet(Node, CState, Replacing, Seed) ->
do_claimant(Node, CState, Replacing, Seed,
fun no_log/2).
+%% @private
+%% @doc Rebalance the ring.
+-spec do_claimant(Node :: node(),
+ CState :: riak_core_ring(), Log :: log()) -> {boolean(),
+ riak_core_ring()}.
+
do_claimant(Node, CState, Log) ->
do_claimant(Node, CState, [], erlang:timestamp(), Log).
+%% @private
+%% @doc Rebalance the ring.
+-spec do_claimant(Node :: node(),
+ CState :: riak_core_ring(),
+ Replacing :: orddict:orddict(node(), node()),
+ Seed :: erlang:timestamp(), Log :: log()) -> {boolean(),
+ riak_core_ring()}.
+
do_claimant(Node, CState, Replacing, Seed, Log) ->
AreJoining = are_joining_nodes(CState),
{C1, CState2} = maybe_update_claimant(Node, CState),
@@ -811,6 +1238,11 @@ do_claimant(Node, CState, Replacing, Seed, Log) ->
{Changed, CState5}.
%% @private
+%% @doc Set a new claimant on the ring if necessary.
+-spec maybe_update_claimant(Node :: node(),
+ CState :: riak_core_ring()) -> {boolean(),
+ riak_core_ring()}.
+
maybe_update_claimant(Node, CState) ->
Members = riak_core_ring:members(CState,
[valid, leaving]),
@@ -829,6 +1261,13 @@ maybe_update_claimant(Node, CState) ->
end.
%% @private
+%% @doc Update the ring if the conditions are right.
+-spec maybe_update_ring(Node :: node(),
+ CState :: riak_core_ring(),
+ Replacing :: orddict:orddict(node(), node()),
+ Seed :: erlang:timestamp(), Log :: log()) -> {boolean(),
+ riak_core_ring()}.
+
maybe_update_ring(Node, CState, Replacing, Seed, Log) ->
Claimant = riak_core_ring:claimant(CState),
case Claimant of
@@ -851,6 +1290,11 @@ maybe_update_ring(Node, CState, Replacing, Seed, Log) ->
end.
%% @private
+%% @doc Set nodes as invalid on the ring that are exiting.
+-spec maybe_remove_exiting(Node :: node(),
+ CState :: riak_core_ring()) -> {boolean(),
+ riak_core_ring()}.
+
maybe_remove_exiting(Node, CState) ->
Claimant = riak_core_ring:claimant(CState),
case Claimant of
@@ -876,11 +1320,19 @@ maybe_remove_exiting(Node, CState) ->
end.
%% @private
+%% @doc Check if there are nodes joining the ring.
+-spec are_joining_nodes(CState ::
+ riak_core_ring()) -> boolean().
+
are_joining_nodes(CState) ->
Joining = riak_core_ring:members(CState, [joining]),
Joining /= [].
%% @private
+%% @doc Compute all auto-joining nodes.
+-spec auto_joining_nodes(CState ::
+ riak_core_ring()) -> [node()].
+
auto_joining_nodes(CState) ->
Joining = riak_core_ring:members(CState, [joining]),
%% case application:get_env(riak_core, staged_joins, true) of false -> Joining; true ->
@@ -892,16 +1344,32 @@ auto_joining_nodes(CState) ->
true].%% end.
%% @private
+%% @doc Handle join of all auto-joining nodes.
+-spec maybe_handle_auto_joining(Node :: node(),
+ CState :: riak_core_ring()) -> {boolean(),
+ riak_core_ring()}.
+
maybe_handle_auto_joining(Node, CState) ->
Auto = auto_joining_nodes(CState),
maybe_handle_joining(Node, Auto, CState).
%% @private
+%% @doc Handle join of joining nodes.
+-spec maybe_handle_joining(Node :: node(),
+ CState :: riak_core_ring()) -> {boolean(),
+ riak_core_ring()}.
+
maybe_handle_joining(Node, CState) ->
Joining = riak_core_ring:members(CState, [joining]),
maybe_handle_joining(Node, Joining, CState).
%% @private
+%% @doc Add joining nodes as valid members to the ring if possible.
+-spec maybe_handle_joining(Node :: node(),
+ Joining :: [node()],
+ CState :: riak_core_ring()) -> {boolean(),
+ riak_core_ring()}.
+
maybe_handle_joining(Node, Joining, CState) ->
Claimant = riak_core_ring:claimant(CState),
case Claimant of
@@ -918,6 +1386,13 @@ maybe_handle_joining(Node, Joining, CState) ->
end.
%% @private
+%% @doc Apply changes to the ring.
+-spec update_ring(CNode :: node(),
+ CState :: riak_core_ring(),
+ Replacing :: orddict:orddict(node(), node()),
+ Seed :: erlang:timestamp(), Log :: log(),
+ Resizing :: boolean()) -> {boolean(), riak_core_ring()}.
+
update_ring(CNode, CState, Replacing, Seed, Log,
false) ->
Next0 = riak_core_ring:pending_changes(CState),
@@ -989,6 +1464,12 @@ update_ring(CNode, CState, _Replacing, _Seed, _Log,
false -> {false, CState}
end.
+%% @private
+%% @doc Install the ring if the resize process is completed.
+-spec maybe_install_resized_ring(CState ::
+ riak_core_ring()) -> {boolean(),
+ riak_core_ring()}.
+
maybe_install_resized_ring(CState) ->
case riak_core_ring:is_resize_complete(CState) of
true -> {true, riak_core_ring:future_ring(CState)};
@@ -996,6 +1477,10 @@ maybe_install_resized_ring(CState) ->
end.
%% @private
+%% @doc Assign partitions to new owners.
+-spec transfer_ownership(CState :: riak_core_ring(),
+ Log :: log()) -> {boolean(), riak_core_ring()}.
+
transfer_ownership(CState, Log) ->
Next = riak_core_ring:pending_changes(CState),
%% Remove already completed and transfered changes
@@ -1028,6 +1513,12 @@ transfer_ownership(CState, Log) ->
{Changed, CState3}.
%% @private
+%% @doc Assign indices owned by replaced nodes to the one replacing them.
+-spec reassign_indices(CState :: riak_core_ring(),
+ Replacing :: orddict:orddict(node(), node()),
+ Seed :: erlang:timestamp(), Log :: log()) -> {boolean(),
+ riak_core_ring()}.
+
reassign_indices(CState, Replacing, Seed, Log) ->
Next = riak_core_ring:pending_changes(CState),
Invalid = riak_core_ring:members(CState, [invalid]),
@@ -1054,10 +1545,19 @@ reassign_indices(CState, Replacing, Seed, Log) ->
{RingChanged or NextChanged, CState3}.
%% @private
+-spec rebalance_ring(CNode :: node(),
+ CState :: riak_core_ring()) -> next().
+
rebalance_ring(CNode, CState) ->
Next = riak_core_ring:pending_changes(CState),
rebalance_ring(CNode, Next, CState).
+%% @private
+%% @doc Run the claim algorithm and compute the differing indices with ld and
+%% new owners.
+-spec rebalance_ring(CNode :: node(), Next :: next(),
+ CState :: riak_core_ring()) -> next().
+
rebalance_ring(_CNode, [], CState) ->
CState2 = riak_core_claim:claim(CState),
Owners1 = riak_core_ring:all_owners(CState),
@@ -1070,6 +1570,10 @@ rebalance_ring(_CNode, [], CState) ->
rebalance_ring(_CNode, Next, _CState) -> Next.
%% @private
+%% @doc Compute List of indices owned by down nodes and their replacements.
+-spec handle_down_nodes(CState :: riak_core_ring(),
+ Next :: next()) -> next().
+
handle_down_nodes(CState, Next) ->
LeavingMembers = riak_core_ring:members(CState,
[leaving, invalid]),
@@ -1093,6 +1597,11 @@ handle_down_nodes(CState, Next) ->
Next3.
%% @private
+%% @doc Assigns all indices owned by a given node to a new node.
+-spec reassign_indices_to(Node :: node(),
+ NewNode :: node(),
+ Ring :: riak_core_ring()) -> riak_core_ring().
+
reassign_indices_to(Node, NewNode, Ring) ->
Indices = riak_core_ring:indices(Ring, Node),
Reassign = [{Idx, NewNode} || Idx <- Indices],
@@ -1100,6 +1609,13 @@ reassign_indices_to(Node, NewNode, Ring) ->
Ring2.
%% @private
+%% @doc Remove a node and compute replacements.
+-spec remove_node(CState :: riak_core_ring(),
+ Node :: node(), Status :: invalid | leaving,
+ Replacing :: orddict:orddict(node(), node()),
+ Seed :: erlang:timestamp(),
+ Log :: log()) -> riak_core_ring().
+
remove_node(CState, Node, Status, Replacing, Seed,
Log) ->
Indices = riak_core_ring:indices(CState, Node),
@@ -1107,6 +1623,13 @@ remove_node(CState, Node, Status, Replacing, Seed,
Indices).
%% @private
+%% @doc Remove a node and compute replacements.
+-spec remove_node(CState :: riak_core_ring(),
+ Node :: node(), Status :: invalid | leaving,
+ Replacing :: orddict:orddict(node(), node()),
+ Seed :: erlang:timestamp(), Log :: log(),
+ Indices :: [integer()]) -> riak_core_ring().
+
remove_node(CState, _Node, _Status, _Replacing, _Seed,
_Log, []) ->
CState;
@@ -1148,6 +1671,13 @@ remove_node(CState, Node, Status, Replacing, Seed, Log,
Next2),
CState3.
+%% @private
+%% @doc Replace a node while respecting an ongoing resize operation.
+-spec replace_node_during_resize(CState0 ::
+ riak_core_ring(),
+ Node :: node(),
+ NewNode :: node()) -> riak_core_ring().
+
replace_node_during_resize(CState0, Node, NewNode) ->
PostResize = riak_core_ring:is_post_resize(CState0),
CState1 = replace_node_during_resize(CState0, Node,
@@ -1155,6 +1685,13 @@ replace_node_during_resize(CState0, Node, NewNode) ->
riak_core_ring:increment_ring_version(riak_core_ring:claimant(CState1),
CState1).
+%% @private
+%% @doc Replace a node while respecting an ongoing resize operation.
+-spec replace_node_during_resize(CStat0 ::
+ riak_core_ring(),
+ Node :: node(), NewNode :: node(),
+ PostResize :: boolean()) -> riak_core_ring().
+
replace_node_during_resize(CState0, Node, NewNode,
false) -> %% ongoing xfers
%% for each of the indices being moved from Node to NewNode, reschedule resize
@@ -1183,8 +1720,15 @@ replace_node_during_resize(CState, Node, _NewNode,
N =/= Node],
riak_core_ring:set_pending_changes(CState, NewNext).
+-spec no_log(any(), any()) -> ok.
+
no_log(_, _) -> ok.
+-spec log(Type :: debug | ownership | reassign | next |
+ any(),
+ {Idx :: integer(), NewOwner :: node(),
+ CState :: riak_core_ring()}) -> ok.
+
log(debug, {Msg, Args}) -> logger:debug(Msg, Args);
log(ownership, {Idx, NewOwner, CState}) ->
Owner = riak_core_ring:index_owner(CState, Idx),
@@ -1198,7 +1742,3 @@ log(next, {Idx, Owner, NewOwner}) ->
logger:debug("(pending) ~b :: ~p -> ~p~n",
[Idx, Owner, NewOwner]);
log(_, _) -> ok.
-
-%% ===================================================================
-%% EUnit tests
-%% ===================================================================
diff --git a/src/riak_core_handoff_listener.erl b/src/riak_core_handoff_listener.erl
index fb98c2eea..ec1a12249 100644
--- a/src/riak_core_handoff_listener.erl
+++ b/src/riak_core_handoff_listener.erl
@@ -24,7 +24,7 @@
-module(riak_core_handoff_listener).
--behavior(gen_nb_server).
+-behaviour(gen_nb_server).
-export([start_link/0]).
@@ -37,6 +37,15 @@
-record(state,
{ipaddr :: string(), portnum :: integer()}).
+-type state() :: #state{}.
+
+-type sock_opts() :: binary | {packet, integer()} |
+ {reuseaddr, boolean()} | {backlog, integer()}.
+
+%% @doc Start the handoff listener listening on the configered ip and port.
+%% @see gen_nb_server:start_link/4.
+-spec start_link() -> {ok, pid()} | {error, any()}.
+
start_link() ->
PortNum = application:get_env(riak_core, handoff_port,
undefined),
@@ -45,16 +54,39 @@ start_link() ->
gen_nb_server:start_link(?MODULE, IpAddr, PortNum,
[IpAddr, PortNum]).
+%% @doc Return the IP address this server is listening to.
+-spec get_handoff_ip() -> string().
+
get_handoff_ip() ->
gen_server:call(?MODULE, handoff_ip, infinity).
+%% @doc Callback for {@link gen_nb_server:start_link/4}. Sets the IP address and
+%% port number in the state.
+%% @param Params List of prameters. Takes two elements: `IpAddr :: string()' and
+%% `PortNum :: integer()'.
+%% @returns `{ok, State}'
+-spec init(Params :: [any()]) -> {ok, state()}.
+
init([IpAddr, PortNum]) ->
register(?MODULE, self()),
{ok, #state{portnum = PortNum, ipaddr = IpAddr}}.
+%% @doc Socket options.
+%% @returns Current socket options. currently they are fixed with
+%% `[binary, {packet, 4}, {reuseaddr, true}, {backlog, 64}]'.
+-spec sock_opts() -> [sock_opts()].
+
sock_opts() ->
[binary, {packet, 4}, {reuseaddr, true}, {backlog, 64}].
+%% @doc Callback for {@link gen_nb_server:call/3}.
+-spec handle_call(Msg :: handoff_ip | handoff_port,
+ From :: {pid(), term()}, State :: state()) -> {reply,
+ {ok,
+ string() |
+ integer()},
+ state()}.
+
handle_call(handoff_ip, _From,
State = #state{ipaddr = I}) ->
{reply, {ok, I}, State};
@@ -62,14 +94,38 @@ handle_call(handoff_port, _From,
State = #state{portnum = P}) ->
{reply, {ok, P}, State}.
+%% @doc Callback for {@link gen_nb_server:cast/2}. Not implemented.
+-spec handle_cast(Msg :: term(),
+ State :: state()) -> {noreply, state()}.
+
handle_cast(_Msg, State) -> {noreply, State}.
+%% @doc Callback for {@link gen_nb_server}. Not implemented.
+-spec handle_info(Info :: term(),
+ State :: state()) -> {noreply, state()}.
+
handle_info(_Info, State) -> {noreply, State}.
+%% @doc Callback for {@link gen_nb_serer}. Not implemented.
+-spec terminate(Reason :: term(),
+ State :: state()) -> ok.
+
terminate(_Reason, _State) -> ok.
+%% @doc Callback for {@link gen_nb_server}. Not implemented.
+-spec code_change(OldVsn :: term(), State :: state(),
+ Extra :: term()) -> {ok, state()}.
+
code_change(_OldVsn, State, _Extra) -> {ok, State}.
+%% @doc Try opening a new inbound connection. If it cannot be opened, close the
+%% socket. Otherwise Set the socket for {@link riak_core_handoff_receiver}.
+%% @param Socket Socket the new connection is requested on.
+%% @param State Current state.
+%% @return `{ok, State}'.
+-spec new_connection(Socket :: inet:socket(),
+ State :: state()) -> {ok, state()}.
+
new_connection(Socket, State) ->
case riak_core_handoff_manager:add_inbound() of
{ok, Pid} ->
diff --git a/src/riak_core_handoff_listener_sup.erl b/src/riak_core_handoff_listener_sup.erl
index ed7f87126..40b5b6f9f 100644
--- a/src/riak_core_handoff_listener_sup.erl
+++ b/src/riak_core_handoff_listener_sup.erl
@@ -29,11 +29,29 @@
{I, {I, start_link, []}, permanent, brutal_kill, Type,
[I]}).
-%% begins the supervisor, init/1 will be called
+%% @doc Begin the supervisor, init/1 will be called
+%% @see supervisor:start_link/3.
+-spec start_link() -> {ok, pid()} |
+ {error,
+ {already_started, pid()} | {shutdown | reason} |
+ term()} |
+ ignore.
+
start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
%% @private
+%% @doc Callback for {@link supervisor:start_link/3}. Starts the
+%% {@link riak_core_handoff_listener} as its supervised child.
+%% @see riak_core_handoff_listener:start_link/0.
+%% @returns Parameters to start the supervised child.
+-spec init([]) -> {ok,
+ {{one_for_one, 10, 10},
+ [{riak_core_handoff_listener,
+ {riak_core_handoff_listener, start_link, []}, permanent,
+ brutal_kill, worker,
+ [riak_core_handoff_listener]}, ...]}}.
+
init([]) ->
{ok,
{{one_for_one, 10, 10},
diff --git a/src/riak_core_handoff_manager.erl b/src/riak_core_handoff_manager.erl
index e023a06f7..d563b60da 100644
--- a/src/riak_core_handoff_manager.erl
+++ b/src/riak_core_handoff_manager.erl
@@ -47,6 +47,8 @@
-record(state,
{excl, handoffs = [] :: [handoff_status()]}).
+-type state() :: #state{}.
+
%% this can be overridden with riak_core handoff_concurrency
-define(HANDOFF_CONCURRENCY, 2).
@@ -61,18 +63,60 @@
%%% API
%%%===================================================================
+%% @doc Start the handoff manager server.
+%% @see gen_server:start_link/4.
+-spec start_link() -> {ok, pid()} | ignore |
+ {error, term()}.
+
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [],
[]).
+%% @doc Callback for {@link gen_server:start_link/4}. The initial state has no
+%% exclusions or handoffs set.
+-spec init([]) -> {ok, state()}.
+
init([]) ->
{ok, #state{excl = sets:new(), handoffs = []}}.
+%% @doc Like {@link add_outbound/7} where source and target index are the same.
+%% @param HOType Handoff type to add.
+%% @param Module VNode module handling the handoff.
+%% @param Idx Index to hand off.
+%% @param Node Node owning the new target index.
+%% @param VnodePid Process id of the node handing off the index.
+%% @param Opts List of handoff options.
+%% @returns `{ok, Sender}' if the handoff was added successfully,
+%% `{error, max_concurrency}' if no more concurrent handoffs can be
+%% added.
+-spec add_outbound(HOType :: ho_type(),
+ Module :: module(), Idx :: index(), Node :: node(),
+ VnodePid :: pid(), Opts :: [term()]) -> {ok, pid()} |
+ {error,
+ max_concurrency}.
+
add_outbound(HOType, Module, Idx, Node, VnodePid,
Opts) ->
add_outbound(HOType, Module, Idx, Idx, Node, VnodePid,
Opts).
+%% @doc Add an outbound handoff from the source index to the target index.
+%% @param HOType Handoff type to add.
+%% @param Module VNode module handling the handoff.
+%% @param SrcIdx Index to hand off.
+%% @param TaragetIdx Index to handoff to.
+%% @param Node Node owning the new target index.
+%% @param VnodePid Process id of the node handing off the index.
+%% @param Opts List of handoff options.
+%% @returns `{ok, Sender}' if the handoff was added successfully,
+%% `{error, max_concurrency}' if no more concurrent handoffs can be
+%% added.
+-spec add_outbound(HOType :: ho_type(),
+ Module :: module(), SrcIdx :: index(),
+ TargetIdx :: index(), Node :: node(), VnodePid :: pid(),
+ Opts :: [{atom(), term()}]) -> {ok, pid()} |
+ {error, max_concurrency}.
+
add_outbound(HOType, Module, SrcIdx, TargetIdx, Node,
VnodePid, Opts) ->
case application:get_env(riak_core,
@@ -86,6 +130,12 @@ add_outbound(HOType, Module, SrcIdx, TargetIdx, Node,
infinity)
end.
+%% @doc Add an inbound handoff. Starts a receiver process.
+%% @return `{ok, Receiver}' if the receiver could be started,
+%% `{error, max_concurrency}' if no additional handoffs can be handled.
+-spec add_inbound() -> {ok, pid()} |
+ {error, max_concurrency}.
+
add_inbound() ->
case application:get_env(riak_core,
disable_inbound_handoff)
@@ -96,8 +146,13 @@ add_inbound() ->
%% @doc Initiate a transfer from `SrcPartition' to `TargetPartition'
%% for the given `Module' using the `FilterModFun' filter.
--spec xfer({index(), node()}, mod_partition(),
- {module(), atom()}) -> ok.
+%% @param PartitionOwner Tuple of the source partition index and owner.
+%% @param ModPartitions Tuple of target module and partition index.
+%% @param FilterModFun Module and function name of a filter function to use.
+%% @returns `ok'.
+-spec xfer(PartitionOwner :: {index(), node()},
+ ModPartitions :: mod_partition(),
+ FilterModFun :: {module(), atom()}) -> ok.
xfer({SrcPartition, SrcOwner},
{Module, TargetPartition}, FilterModFun) ->
@@ -109,41 +164,81 @@ xfer({SrcPartition, SrcOwner},
FilterModFun}).
%% @doc Associate `Data' with the inbound handoff `Recv'.
--spec set_recv_data(pid(), proplists:proplist()) -> ok.
+%% @param Recv Process ID of the handoff receiver.
+%% @param Data Data to associate with the receiver.
+%% @returns `ok'.
+-spec set_recv_data(Recv :: pid(),
+ Data :: proplists:proplist()) -> ok.
set_recv_data(Recv, Data) ->
gen_server:call(?MODULE, {set_recv_data, Recv, Data},
infinity).
+%% @doc Get the list of all handoff status.
+%% @returns All handof_status in the current state.
+-spec status() -> [handoff_status()].
+
status() -> status(none).
+%% @doc Get the list of all handoff status containing the given key-value pair.
+%% @param Filter A key-value pair that is necessary to consider a handoff status
+%% part of the status. If `none' is given, nothing is filtered out.
+%% @returns The filtered list of handoff status.
+-spec status(Filter :: none |
+ {term(), term()}) -> [handoff_status()].
+
status(Filter) ->
gen_server:call(?MODULE, {status, Filter}, infinity).
%% @doc Send status updates `Stats' to the handoff manager for a
%% particular handoff identified by `ModSrcTgt'.
--spec status_update(mod_src_tgt(), ho_stats()) -> ok.
+%% @param ModSrcTgt Module, source and target index identifying the handoff.
+%% @param Stats Handoff stats.
+%% @returns `ok'.
+-spec status_update(ModSrcTgt :: mod_src_tgt(),
+ Stats :: ho_stats()) -> ok.
status_update(ModSrcTgt, Stats) ->
gen_server:cast(?MODULE,
{status_update, ModSrcTgt, Stats}).
+%% @doc Set a new limit of concurrent handoffs. If the limit is less then the
+%% current number of concurrent handoffs, some are discarded.
+%% @param Limit Maximum number of concurrent handoffs.
+%% @returns `ok'.
+-spec set_concurrency(Limit :: integer()) -> ok.
+
set_concurrency(Limit) ->
gen_server:call(?MODULE, {set_concurrency, Limit},
infinity).
+-spec get_concurrency() -> integer().
+
get_concurrency() ->
gen_server:call(?MODULE, get_concurrency, infinity).
%% @doc Kill the transfer of `ModSrcTarget' with `Reason'.
--spec kill_xfer(node(), tuple(), any()) -> ok.
+%% @param SrcNode Node reqeusting to kill the transfer.
+%% @param ModSrcTarget Tupel of module, source and target index that identifies
+%% the handoff.
+%% @param Reason Term giving a reason for the termination.
+%% @returns `ok'.
+-spec kill_xfer(SrcNode :: node(),
+ ModSrcTarget :: mod_src_tgt(), Reason :: any()) -> ok.
kill_xfer(SrcNode, ModSrcTarget, Reason) ->
gen_server:cast({?MODULE, SrcNode},
{kill_xfer, ModSrcTarget, Reason}).
+%% @doc Kill all handoffs.
+%% @returns `ok'.
+-spec kill_handoffs() -> ok.
+
kill_handoffs() -> set_concurrency(0).
+%% @doc Kill all handoffs in the given direction
+%% @param Direction Determines if `inbound' or `outbound' handoffs are killed.
+%% @returns `ok'.
-spec kill_handoffs_in_direction(inbound |
outbound) -> ok.
@@ -151,14 +246,34 @@ kill_handoffs_in_direction(Direction) ->
gen_server:call(?MODULE, {kill_in_direction, Direction},
infinity).
+%% @doc Add a handoff exclusion for a given module and source index.
+%% @param Module Module to add the exception for.
+%% @param Index Index to add the exception for.
+%% @returns `ok'.
+-spec add_exclusion(Module :: module(),
+ Index :: index()) -> ok.
+
add_exclusion(Module, Index) ->
gen_server:cast(?MODULE,
{add_exclusion, {Module, Index}}).
+%% @doc Remove a handoff exclusion for the given module and index.
+%% @param Module MOdule identifying the exclusion.
+%% @param Index INdex identifying the exclusion.
+%% @returns `ok'.
+-spec remove_exclusion(Module :: module(),
+ Index :: index()) -> ok.
+
remove_exclusion(Module, Index) ->
gen_server:cast(?MODULE,
{del_exclusion, {Module, Index}}).
+%% @doc Get all indices for which an exclusion on a module exists.
+%% @param Module Module to get exclusions for.
+%% @returns List of indices.
+-spec get_exclusions(Module :: module()) -> {ok,
+ [index()]}.
+
get_exclusions(Module) ->
gen_server:call(?MODULE, {get_exclusions, Module},
infinity).
@@ -167,6 +282,20 @@ get_exclusions(Module) ->
%%% Callbacks
%%%===================================================================
+%% @doc Callback for {@link gen_server:call/3}.
+-spec handle_call(Msg :: {get_exclusions, module()} |
+ {add_outbound, ho_type(), module(), index(), index(),
+ node(), pid(), [{atom(), term()}]} |
+ {add_inbound} |
+ {set_recv_data, pid(), proplists:proplist()} |
+ {xfer_status, handoff_status()} |
+ {status, none | {term(), term()}} |
+ {set_concurrency, integer()} | get_concurrency |
+ {kill_in_direction, inound | outbound},
+ From :: {pid(), term()}, State :: state()) -> {reply,
+ term(),
+ state()}.
+
handle_call({get_exclusions, Module}, _From,
State = #state{excl = Excl}) ->
Reply = [I
@@ -262,6 +391,10 @@ handle_call({kill_in_direction, Direction}, _From,
|| #handoff_status{transport_pid = Pid} <- Kill],
{reply, ok, State}.
+%% @doc Callback for {@link gen_server:cast/2}.
+-spec handle_cast(Msg :: term(), state()) -> {noreply,
+ state()}.
+
handle_cast({del_exclusion, {Mod, Idx}},
State = #state{excl = Excl}) ->
Excl2 = sets:del_element({Mod, Idx}, Excl),
@@ -311,6 +444,13 @@ handle_cast({kill_xfer, ModSrcTarget, Reason}, State) ->
HS2 = kill_xfer_i(ModSrcTarget, Reason, HS),
{noreply, State#state{handoffs = HS2}}.
+%% @doc Callback for {@link gen_server} handling incoming messages that are not
+%% a call or cast.
+%% @returns `{noreply, State}'.
+-spec handle_info({'DOWN', reference(), process, pid(),
+ term()},
+ state()) -> {noreply, state()}.
+
handle_info({'DOWN', Ref, process, _Pid, Reason},
State = #state{handoffs = HS}) ->
case lists:keytake(Ref, #handoff_status.transport_mon,
@@ -384,14 +524,29 @@ handle_info({'DOWN', Ref, process, _Pid, Reason},
end
end.
+%% @doc Callback for {@link gen_server:stop/1}. Not implemented.
+-spec terminate(Reason :: term(),
+ State :: state()) -> ok.
+
terminate(_Reason, _State) -> ok.
+%% @doc Callback for {@link gen_server}. Not implemented.
+-spec code_change(OldVsn :: term(), State :: state(),
+ Extra :: term()) -> {ok, state()}.
+
code_change(_OldVsn, State, _Extra) -> {ok, State}.
%%%===================================================================
%%% Private
%%%===================================================================
+%% @private
+%% @doc Build a status list from a `handoff_status' record.
+%% @param HO Handoff status record.
+%% @returns `{status_v2, StatusEntries}'.
+-spec build_status(HO ::
+ handoff_status()) -> {status_v2, [{atom(), term()}]}.
+
build_status(HO) ->
#handoff_status{mod_src_tgt = {Mod, SrcP, TargetP},
src_node = SrcNode, target_node = TargetNode,
@@ -406,6 +561,14 @@ build_status(HO) ->
{sender_pid, TPid}, {stats, calc_stats(HO)},
{type, Type}]}.
+%% @private
+%% @doc Retrieve statistics from a handoff status.
+%% @param HO Handoff status.
+%% @returns List of statistics or `no_stats'.
+-spec calc_stats(HO :: handoff_status()) -> [{atom(),
+ term()}] |
+ no_stats.
+
calc_stats(#handoff_status{stats = Stats,
timestamp = StartTS, size = Size}) ->
case dict:find(last_update, Stats) of
@@ -424,12 +587,33 @@ calc_stats(#handoff_status{stats = Stats,
{size, CalcSize}, {pct_done_decimal, Done}]
end.
+%% @private
+%% @doc Get actual size from a size entry.
+-spec get_size(Size :: {function(), dynamic} |
+ {non_neg_integer(), bytes | objects}) -> {integer(),
+ bytes |
+ objects} |
+ undefined.
+
get_size({F, dynamic}) -> F();
get_size(S) -> S.
-calc_pct_done(_, _, undefined) -> undefined;
+%% @private
+%% @doc Calculate percentage of completed handoffs?
+-spec calc_pct_done(Objs :: integer(),
+ Bytes :: integer(),
+ Size :: undefined |
+ {integer(), objects | bytes}) -> float() |
+ undefined.
+
calc_pct_done(Objs, _, {Size, objects}) -> Objs / Size;
-calc_pct_done(_, Bytes, {Size, bytes}) -> Bytes / Size.
+calc_pct_done(_, Bytes, {Size, bytes}) -> Bytes / Size;
+calc_pct_done(_, _, undefined) -> undefined.
+
+%% @private
+%% @doc Create a filter function from a key value pair.
+-spec filter(Filter :: none | {}) -> fun(({status_v2,
+ handoff_status()}) -> boolean()).
filter(none) -> fun (_) -> true end;
filter({Key, Value} = _Filter) ->
@@ -440,6 +624,18 @@ filter({Key, Value} = _Filter) ->
end
end.
+%% @private
+%% @doc Generate a resize tranfer filter function.
+%% @param Ring Ring affected by the transfer.
+%% @param Module Module involved in the handoff.
+%% @param Src Source index.
+%% @param Target Target index.
+%% @returns Filter function.
+-spec resize_transfer_filter(Ring ::
+ riak_core_ring:riak_core_ring(),
+ Module :: module(), Src :: index(),
+ Target :: index()) -> fun((term()) -> boolean()).
+
resize_transfer_filter(Ring, Module, Src, Target) ->
fun (K) ->
{_, Hashed} = Module:object_info(K),
@@ -447,6 +643,18 @@ resize_transfer_filter(Ring, Module, Src, Target) ->
Ring)
end.
+%% @private
+%% @doc Create a filter function that filters for unsent indices.
+%% @param Ring Ring the transfer takes place on.
+%% @param Module Module involved in the handoff.
+%% @param Src Source index of the handoffs.
+%% @returns Function filtering for unsent indices.
+-spec resize_transfer_notsent_fun(Ring ::
+ riak_core_ring:riak_core_ring(),
+ Module :: module(),
+ Src :: index()) -> fun((term(),
+ [index()]) -> boolean()).
+
resize_transfer_notsent_fun(Ring, Module, Src) ->
Shrinking = riak_core_ring:num_partitions(Ring) >
riak_core_ring:future_num_partitions(Ring),
@@ -462,6 +670,14 @@ resize_transfer_notsent_fun(Ring, Module, Src) ->
Module, Src, Key, Acc)
end.
+-spec record_seen_index(Ring ::
+ riak_core_ring:riak_core_ring(),
+ Shrinking :: boolean(),
+ NValMap :: [{term(), integer()}], DefaultN :: integer(),
+ Module :: module(), Src :: index(), Key :: term(),
+ Seen ::
+ ordsets:ordset(index())) -> ordsets:ordset(index()).
+
record_seen_index(Ring, Shrinking, NValMap, DefaultN,
Module, Src, Key, Seen) ->
{Bucket, Hashed} = Module:object_info(Key),
@@ -476,11 +692,19 @@ record_seen_index(Ring, Shrinking, NValMap, DefaultN,
FutureIndex -> ordsets:add_element(FutureIndex, Seen)
end.
+%% @private
+%% @doc Retrieve the maximum number of concurrent handoffs.
+-spec get_concurrency_limit() -> integer().
+
get_concurrency_limit() ->
application:get_env(riak_core, handoff_concurrency,
?HANDOFF_CONCURRENCY).
-%% true if handoff_concurrency (inbound + outbound) hasn't yet been reached
+%% @doc Check if the concurrency limit is reached.
+%% @returns `true' if handoff_concurrency (inbound + outbound) hasn't yet been
+%% reached.
+-spec handoff_concurrency_limit_reached() -> boolean().
+
handoff_concurrency_limit_reached() ->
Receivers =
supervisor:count_children(riak_core_handoff_receiver_sup),
@@ -492,24 +716,50 @@ handoff_concurrency_limit_reached() ->
get_concurrency_limit() =<
ActiveReceivers + ActiveSenders.
+%% @private
+%% @doc Like {@link send_handoff/8} without filters or an origin node.
+-spec send_handoff(HOType :: ho_type(),
+ ModSourceTarget :: {module(), index(), index()},
+ Node :: node(), Pid :: pid(), HS :: list(),
+ Opts :: [{atom(), term()}]) -> {ok, handoff_status()} |
+ {error, max_concurrency} |
+ {false, handoff_status()}.
+
send_handoff(HOType, ModSrcTarget, Node, Pid, HS,
Opts) ->
send_handoff(HOType, ModSrcTarget, Node, Pid, HS,
{none, none}, none, Opts).
%% @private
-%%
%% @doc Start a handoff process for the given `Mod' from
%% `Src'/`VNode' to `Target'/`Node' using the given `Filter'
%% function which is a predicate applied to the key. The
%% `Origin' is the node this request originated from so a reply
%% can't be sent on completion.
--spec send_handoff(ho_type(),
- {module(), index(), index()}, node(), pid(), list(),
- {predicate() | none, {module(), atom()} | none}, node(),
- [{atom(), term()}]) -> {ok, handoff_status()} |
- {error, max_concurrency} |
- {false, handoff_status()}.
+%% @param HOType Type of the handoff.
+%% @param MST Triple of the module, source index and target index the handoff
+%% affects.
+%% @param Node Target node of the handoff.
+%% @param Vnode Process id of the source node.
+%% @param HS List of handoff status.
+%% @param FilterTuple Tuple of filter predicate and filter function in a module.
+%% @param Origin Node requesting the hadoff.
+%% @param Opts List of options for the handoff.
+%% @returns `{ok, NewHandoff}' if the handoff is should happen,
+%% `{false, CurrentHandoff}' if the handoff should not happen,
+%% `{error, max_concurrency}' if the concurrency limit is reached.
+-spec send_handoff(HOType :: ho_type(),
+ MST :: {Mod :: module(), Src :: index(),
+ Target :: index()},
+ Node :: node(), Vnode :: pid(), HS :: list(),
+ FilterTuple :: {Filter :: predicate() | none,
+ FilterModFun :: {module(), atom()} | none},
+ Origin :: node(), Opts :: [{atom(), term()}]) -> {ok,
+ handoff_status()} |
+ {error,
+ max_concurrency} |
+ {false,
+ handoff_status()}.
send_handoff(HOType, {Mod, Src, Target}, Node, Vnode,
HS, {Filter, FilterModFun}, Origin, Opts) ->
@@ -579,7 +829,12 @@ send_handoff(HOType, {Mod, Src, Target}, Node, Vnode,
end
end.
-%% spawn a receiver process
+%% @doc Spawn a receiver process.
+%% @returns `{ok, Status}' if the handoff receiver could be started,
+%% `{error, max_concurrency}' if the concurrency limit is reached.
+-spec receive_handoff() -> {ok, handoff_status()} |
+ {error, max_concurrency}.
+
receive_handoff() ->
case handoff_concurrency_limit_reached() of
true -> {error, max_concurrency};
@@ -597,6 +852,15 @@ receive_handoff() ->
status = [], stats = dict:new(), req_origin = none}}
end.
+%% @private
+%% @doc Update a stats dictionary with a new stats record.
+%% @param StatsUpdate handoff stats record containing new information.
+%% @param Stats Stats dictionary to update.
+%% @returns Updated stats dictionary.
+-spec update_stats(StatsUpdate :: ho_stats(),
+ Stats :: dict:dict(term(), term())) -> dict:dict(term(),
+ term()).
+
update_stats(StatsUpdate, Stats) ->
#ho_stats{last_update = LU, objs = Objs,
bytes = Bytes} =
@@ -605,6 +869,16 @@ update_stats(StatsUpdate, Stats) ->
Stats3 = dict:update_counter(bytes, Bytes, Stats2),
dict:store(last_update, LU, Stats3).
+%% @private
+%% @doc Check if a size object is valid, i.e. for objects and bytes the integer
+%% is positive and for dynamic size the function is a function.
+%% @returns Validated size or `undefined'.
+-spec validate_size(Size :: {function(), dynamic} |
+ {integer(), bytes | objects}) -> function() |
+ {integer(),
+ bytes | objects} |
+ undefined.
+
validate_size(Size = {N, U})
when is_number(N) andalso
N > 0 andalso (U =:= bytes orelse U =:= objects) ->
@@ -615,10 +889,17 @@ validate_size(Size = {F, dynamic})
validate_size(_) -> undefined.
%% @private
-%%
%% @doc Kill and remove _each_ xfer associated with `ModSrcTarget'
%% with `Reason'. There might be more than one because repair
%% can have two simultaneous inbound xfers.
+%% @param ModSrcTarget Triple of module, source and target index to identify the
+%% handoff.
+%% @param Reason Reason to kill the transfer.
+%% @param HS Handoff status to remove the transfer from.
+%% @returns Handoff status with the transfer removed.
+-spec kill_xfer_i(ModSrcTarget :: mod_src_tgt(),
+ Reason :: term(), HS :: [tuple()]) -> [tuple()].
+
kill_xfer_i(ModSrcTarget, Reason, HS) ->
case lists:keytake(ModSrcTarget,
#handoff_status.mod_src_tgt, HS)
@@ -643,6 +924,17 @@ kill_xfer_i(ModSrcTarget, Reason, HS) ->
kill_xfer_i(ModSrcTarget, Reason, HS2)
end.
+%% @private
+%% @doc Change the application setting to enable or disable handoffs in the
+%% given direction.
+%% @param EnOrDis Enable or disable handoffs.
+%% @param Direction Direction of the handoffs to enable or disable.
+%% @returns `ok'.
+-spec handoff_change_enabled_setting(EnOrDis :: enable |
+ disable,
+ Direction :: inbound | outbound |
+ both) -> ok.
+
handoff_change_enabled_setting(EnOrDis, Direction) ->
SetFun = case EnOrDis of
enable -> fun handoff_enable/1;
@@ -654,6 +946,12 @@ handoff_change_enabled_setting(EnOrDis, Direction) ->
both -> SetFun(inbound), SetFun(outbound)
end.
+%% @private
+%% @doc Enable handoffs in the given direction.
+%% @param Direction Enable inbound or outbound handoffs.
+-spec handoff_enable(Direction :: inbound |
+ outbound) -> ok.
+
handoff_enable(inbound) ->
application:set_env(riak_core, disable_inbound_handoff,
false);
@@ -661,6 +959,13 @@ handoff_enable(outbound) ->
application:set_env(riak_core, disable_outbound_handoff,
false).
+%% @private
+%% @doc Disable handoffs in the given direction.
+%% @param Direction Disable inbound or outbound handoffs.
+%% @returns `ok'.
+-spec handoff_disable(Direction :: inbound |
+ outbound) -> ok.
+
handoff_disable(inbound) ->
application:set_env(riak_core, disable_inbound_handoff,
true),
diff --git a/src/riak_core_handoff_receiver.erl b/src/riak_core_handoff_receiver.erl
index d6558297e..c74edb387 100644
--- a/src/riak_core_handoff_receiver.erl
+++ b/src/riak_core_handoff_receiver.erl
@@ -41,19 +41,40 @@
vnode :: pid() | undefined,
count = 0 :: non_neg_integer()}).
+-type state() :: #state{}.
+
%% set the TCP receive timeout to five minutes to be conservative.
-define(RECV_TIMEOUT, 300000).
%% set the timeout for the vnode to process the handoff_data msg to 60s
-define(VNODE_TIMEOUT, 60000).
+%% @doc Starts the receiver server.
+%% @see gen_server:start_link/3.
+-spec start_link() -> {ok, pid()} | ignore |
+ {error, {already_started, pid()} | term()}.
+
start_link() -> gen_server:start_link(?MODULE, [], []).
+%% @doc Set the socket to listen to.
+%% @param Pid Process ID of the receiver.
+%% @param Socket Socket to listen to.
+%% @returns `ok'.
+-spec set_socket(Pid :: pid(),
+ Socket :: inet:socket()) -> ok.
+
set_socket(Pid, Socket) ->
gen_server:call(Pid, {set_socket, Socket}).
+%% @doc CHeck if the receiver supports batching. Currently every receiver
+%% supports batching.
+-spec supports_batching() -> true.
+
supports_batching() -> true.
+%% @doc Callback for {@link gen_server:start_link/3}. Sets the default timeouts.
+-spec init([]) -> {ok, state()}.
+
init([]) ->
{ok,
#state{recv_timeout_len =
@@ -64,6 +85,11 @@ init([]) ->
handoff_receive_vnode_timeout,
?VNODE_TIMEOUT)}}.
+%% @doc Callback for {@link gen_server:call/3}.
+-spec handle_call(Msg :: {set_socket, inet:socket()},
+ From :: {pid(), term()}, State :: state()) -> {reply,
+ ok, state()}.
+
handle_call({set_socket, Socket0}, _From, State) ->
SockOpts = [{active, once}, {packet, 4}, {header, 1}],
ok = inet:setopts(Socket0, SockOpts),
@@ -71,6 +97,11 @@ handle_call({set_socket, Socket0}, _From, State) ->
Socket = Socket0,
{reply, ok, State#state{sock = Socket, peer = Peer}}.
+%% @doc Callback for {@link gen_server}. Handles tcp messages.
+-spec handle_info(Msg :: term(),
+ State :: state()) -> {stop, normal, state()} |
+ {noreply, state(), non_neg_integer()}.
+
handle_info({tcp_closed, _Socket},
State = #state{partition = Partition, count = Count,
peer = Peer}) ->
@@ -108,6 +139,16 @@ handle_info(timeout, State) ->
State#state.peer]),
{stop, normal, State}.
+%% @private
+%% @doc Process incoming tcp messages. If the message leads to an error,
+%% {@link erlang:exit/1} is called.
+%% @param MsgType Code for the type of message.
+%% @param MsgData Binary encoding of data.
+%% @param State Current state.
+%% @returns The updated state after the message has been processed.
+-spec process_message(MsgType :: integer(),
+ MsgData :: binary(), State :: state()) -> state().
+
process_message(?PT_MSG_INIT, MsgData,
State = #state{vnode_mod = VNodeMod, peer = Peer}) ->
<> = MsgData,
@@ -173,12 +214,33 @@ process_message(_, _MsgData,
<<(?PT_MSG_UNKNOWN):8, "unknown_msg">>),
State.
+%% @doc Callback for {@link gen_server:cast/2}. Not implemented.
+-spec handle_cast(Msg :: term(),
+ State :: state()) -> {noreply, state()}.
+
handle_cast(_Msg, State) -> {noreply, State}.
+%% @doc Callback for {@link gen_server:stop/1}. Not implemented.
+-spec terminate(Reason :: term(),
+ State :: state()) -> ok.
+
terminate(_Reason, _State) -> ok.
+%% @doc Callback for {@link gen_server}. Not implemented.
+-spec code_change(OldVsn :: term() | {down, term()},
+ State :: state(), Extra :: term()) -> {ok, state()}.
+
code_change(_OldVsn, State, _Extra) -> {ok, State}.
+%% @private
+%% @doc Transforms a socket to a safe peername via a module's callback function.
+%% @param Skt Socket to create the peername for.
+%% @param Module Module creating the peername.
+%% @returns Tuple containing the IP address as a string and port as an integer.
+-spec safe_peername(Skt :: inet:socket(),
+ Module :: module()) -> {string(), integer()} |
+ {unknown, unknown}.
+
safe_peername(Skt, Module) ->
case Module:peername(Skt) of
{ok, {Host, Port}} -> {inet_parse:ntoa(Host), Port};
diff --git a/src/riak_core_handoff_receiver_sup.erl b/src/riak_core_handoff_receiver_sup.erl
index 0a9402f10..9203de019 100644
--- a/src/riak_core_handoff_receiver_sup.erl
+++ b/src/riak_core_handoff_receiver_sup.erl
@@ -32,11 +32,29 @@
{I, {I, start_link, []}, temporary, brutal_kill, Type,
[I]}).
-%% begins the supervisor, init/1 will be called
+%% @doc Begin the supervisor, init/1 will be called
+%% @see supervisor:start_link/3.
+-spec start_link() -> {ok, pid()} |
+ {error,
+ {already_started, pid()} | {shutdown | reason} |
+ term()} |
+ ignore.
+
start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
%% @private
+%% @doc Callback for {@link supervisor:start_link/3}. Starts the
+%% {@link riak_core_handoff_receiver} as its supervised child.
+%% @see riak_core_handoff_receiver:start_link/0.
+%% @returns Parameters to start the supervised child.
+-spec init([]) -> {ok,
+ {{simple_one_for_one, 10, 10},
+ [{riak_core_handoff_receiver,
+ {riak_core_handoff_receiver, start_link, []}, temporary,
+ brutal_kill, worker,
+ [riak_core_handoff_receiver]}, ...]}}.
+
init([]) ->
{ok,
{{simple_one_for_one, 10, 10},
diff --git a/src/riak_core_handoff_sender.erl b/src/riak_core_handoff_sender.erl
index 6da4f0a37..cac970454 100644
--- a/src/riak_core_handoff_sender.erl
+++ b/src/riak_core_handoff_sender.erl
@@ -70,10 +70,24 @@
type :: ho_type(), notsent_acc :: term(),
notsent_fun :: function() | undefined}).
+-type ho_acc() :: #ho_acc{}.
+
%%%===================================================================
%%% API
%%%===================================================================
+%% @doc Starts the handoff sender process and starts the handoff fold.
+%% @param TargetNode Node to send the handoff to.
+%% @param Module Module handling the handoff.
+%% @param Type Type of the handoff.
+%% @param Opts Handoff options.
+%% @param VNode Process ID of the vnode owning the handoff.
+%% @returns `{ok, Pid}' where `Pid' is the process ID of the handoff sender.
+-spec start_link(TargetNode :: node(),
+ Module :: module(),
+ {Type :: ho_type(), Opts :: [{atom(), term()}]},
+ VNode :: pid()) -> {ok, pid()}.
+
start_link(TargetNode, Module, {Type, Opts}, Vnode) ->
Pid = spawn_link(fun () ->
start_fold(TargetNode, Module, {Type, Opts}, Vnode)
@@ -84,6 +98,26 @@ start_link(TargetNode, Module, {Type, Opts}, Vnode) ->
%%% Private
%%%===================================================================
+%% @private
+%% @doc Start the handoff fold. First checks if the handoff is not aborted by a
+%% worker. After this the receiving node is verified. Then a fold request
+%% object is created and synchronously sent to the vnode handling the
+%% handoff.
+%% @param TargetNode Target of the handoff.
+%% @param Module handling the handoff.
+%% @param Type Handoff type.
+%% @param Opts Handoff options.
+%% @param ParentPid Process ID of the vnode owning the handoff.
+%% @param SrcNode Node handing off the data.
+%% @param SrcPartition Index of the partition to be handed off.
+%% @param TargetPartition Index of the partition to hand off data to.
+%% @returns `ok'.
+-spec start_fold_(TargetNode :: node(),
+ Module :: module(), Type :: ho_type(),
+ Opts :: [{atom(), term()}], ParentPid :: pid(),
+ SrcNode :: node(), SrcPartition :: index(),
+ TargetPartition :: index()) -> ok.
+
start_fold_(TargetNode, Module, Type, Opts, ParentPid,
SrcNode, SrcPartition, TargetPartition) ->
%% Give workers one more chance to abort or get a lock or whatever.
@@ -250,6 +284,20 @@ start_fold_(TargetNode, Module, Type, Opts, ParentPid,
end
end.
+%% @private
+%% @doc Start the handoff fold on the remote node.
+%% @param TargetNode Node to handoff to.
+%% @param Module Module handling handoff.
+%% @param Type Handoff type.
+%% @param Opts List of handoff options.
+%% @param ParentPid Process ID of the vnode handling the handoff.
+%% @returns `ok'.
+%% @see start_fold_/8.
+-spec start_fold(TargetNode :: node(),
+ Module :: module(),
+ {Type :: ho_type(), Opts :: [{atom(), term()}]},
+ ParentPid :: pid()) -> ok.
+
start_fold(TargetNode, Module, {Type, Opts},
ParentPid) ->
SrcNode = node(),
@@ -280,6 +328,14 @@ start_fold(TargetNode, Module, {Type, Opts},
riak_core_vnode:handoff_error(ParentPid, Err, Reason)
end.
+%% @private
+%% @doc Starts the handoff timer with the tick interval based on the receive
+%% timeout.
+%% @returns `{ok, TRef}' if the timer could be started, `{error, Reason}'
+%% otherwise.
+-spec start_visit_item_timer() -> {ok, timer:tref()} |
+ {error, term()}.
+
start_visit_item_timer() ->
Ival = case application:get_env(riak_core,
handoff_receive_timeout, undefined)
@@ -289,6 +345,17 @@ start_visit_item_timer() ->
end,
timer:send_interval(Ival, tick_send_sync).
+%% @private
+%% @doc Visit the given key-value item. Used as the fold-function in
+%% {@link start_fold_/8}.
+%% @param K Key.
+%% @param V Value.
+%% @param Acc0 Initial accumulator.
+%% @returns Handoff accumulator after the operation.
+%% @see visit_item2/3.
+-spec visit_item(K :: term(), V :: term(),
+ Acc0 :: ho_acc()) -> ho_acc().
+
visit_item(K, V,
Acc0 = #ho_acc{acksync_threshold = AccSyncThreshold}) ->
%% Eventually, a vnode worker proc will be doing this fold, but we don't
@@ -308,6 +375,18 @@ visit_item(K, V,
after 0 -> visit_item2(K, V, Acc)
end.
+%% @private
+%% @doc Visit the given key-value item and decide if it is to be sent in the
+%% handoff. If the current accumulator is marked with an error this is a
+%% no-op. If the ack-sync-threshold is reached try to sync acks. Otherwise
+%% prepare and sent the handoff-item.
+%% @param K Key.
+%% @param V Value.
+%% @param Acc Accumulator.
+%% @returns Accumulator after operation.
+-spec visit_item2(K :: term(), V :: term(),
+ Acc :: ho_acc()) -> ho_acc().
+
%% When a tcp error occurs, the ErrStatus argument is set to {error, Reason}.
%% Since we can't abort the fold, this clause is just a no-op.
visit_item2(_K, _V,
@@ -406,11 +485,32 @@ visit_item2(K, V, Acc) ->
notsent_acc = NewNotSentAcc}
end.
+%% @private
+%% @doc Handle an item that is not sent with the given callback function.
+%% @param NotSentFun Function to handel a key not sent. Can be `undefined'.
+%% @param Acc Current Handoff fold accumulator.
+%% @param Key Key of the item not sent.
+%% @returns New fold accumulator.
+-spec handle_not_sent_item(NotSentFun :: fun((term(),
+ ho_acc()) -> ho_acc()) |
+ undefined,
+ Acc :: ho_acc(), Key :: term()) -> undefined |
+ ho_acc().
+
handle_not_sent_item(undefined, _, _) -> undefined;
handle_not_sent_item(NotSentFun, Acc, Key)
when is_function(NotSentFun) ->
NotSentFun(Key, Acc).
+%% @private
+%% @doc Send a list of items to the socket specified in the handoff accumulator.
+%% @param ItemsReverseList List of items to send.
+%% @param Acc Handoff accumulator record containing information about the
+%% current handoff fold.
+%% @returns Handoff accumulator after the objects have been sent.
+-spec send_objects(ItemsReverseList :: [binary()],
+ Acc :: ho_acc()) -> ho_acc().
+
send_objects([], Acc) -> Acc;
send_objects(ItemsReverseList, Acc) ->
Items = lists:reverse(ItemsReverseList),
@@ -438,6 +538,13 @@ send_objects(ItemsReverseList, Acc) ->
Acc#ho_acc{error = {error, Reason}, stats = Stats3}
end.
+%% @private
+%% @doc Retrieve the ip of the handoff listener on the given node.
+%% @param Node Node to get the handoff IP for.
+%% @returns `{ok, IP}' where `IP' is the IP address as a string, or `error'.
+-spec get_handoff_ip(Node :: node()) -> {ok, string()} |
+ error.
+
get_handoff_ip(Node) when is_atom(Node) ->
case riak_core_util:safe_rpc(Node,
riak_core_handoff_listener, get_handoff_ip, [],
@@ -447,64 +554,96 @@ get_handoff_ip(Node) when is_atom(Node) ->
Res -> Res
end.
+%% @private
+%% @doc Retrieve the port of the handoff listener on the given node.
+%% @param Node Node to get the handoff port for.
+%% @returns `{ok, Port}' Where `Port' is the port number as an integer.
+-spec get_handoff_port(Node :: node()) -> {ok,
+ integer()}.
+
get_handoff_port(Node) when is_atom(Node) ->
gen_server:call({riak_core_handoff_listener, Node},
handoff_port, infinity).
+%% @private
+%% @doc Get the timeout value set for receivig a handoff.
+%% @return Timeout value.
+-spec get_handoff_receive_timeout() -> timeout().
+
get_handoff_receive_timeout() ->
application:get_env(riak_core, handoff_timeout,
?TCP_TIMEOUT).
+%% @private
+%% @doc Compute the time from the start time to now in seconds.
+%% @param StartFoldTime Timestamp of when the fold started.
+-spec end_fold_time(StartFoldTime ::
+ os:timestamp()) -> float().
+
end_fold_time(StartFoldTime) ->
EndFoldTime = os:timestamp(),
timer:now_diff(EndFoldTime, StartFoldTime) / 1000000.
%% @private
-%%
%% @doc Produce the value of `now/0' as if it were called `S' seconds
%% in the future.
--spec future_now(pos_integer()) -> erlang:timestamp().
+%% @param S Number of seconds in the future.
+%% @returns Timestamp `S' seconds in the future.
+-spec future_now(S ::
+ pos_integer()) -> erlang:timestamp().
future_now(S) ->
{Megas, Secs, Micros} = os:timestamp(),
{Megas, Secs + S, Micros}.
%% @private
-%%
%% @doc Check if the given timestamp `TS' has elapsed.
--spec is_elapsed(erlang:timestamp()) -> boolean().
+%% @param TS Timestamp possibly in the future.
+%% @returns `true' if the timestamp is in the past, `false' otherwise.
+-spec is_elapsed(TS :: erlang:timestamp()) -> boolean().
is_elapsed(TS) -> os:timestamp() >= TS.
%% @private
-%%
%% @doc Increment `Stats' byte count by `NumBytes'.
--spec incr_bytes(ho_stats(),
- non_neg_integer()) -> NewStats :: ho_stats().
+%% @param Stats Stats to change.
+%% @param NumBytes Number of bytes to add.
+%% @returns Updated stats.
+-spec incr_bytes(Stats :: ho_stats(),
+ NumBytes :: non_neg_integer()) -> NewStats ::
+ ho_stats().
incr_bytes(Stats = #ho_stats{bytes = Bytes},
NumBytes) ->
Stats#ho_stats{bytes = Bytes + NumBytes}.
+%% @private
+%% @doc Increment the object count by 1.
+%% @param Stats Stats to increment the object count on.
+%% @returns Updated stats.
+-spec incr_objs(Stats :: ho_stats()) -> ho_stats().
+
incr_objs(Stats) -> incr_objs(Stats, 1).
%% @private
-%%
-%% @doc Increment `Stats' object count by NObjs:
--spec incr_objs(ho_stats(),
- non_neg_integer()) -> NewStats :: ho_stats().
+%% @doc Increment `Stats' object count by NObjs.
+%% @param Stats Stats to increment the object count on.
+%% @param Number of objects to add.
+%% @returns Updated stats.
+-spec incr_objs(Stats :: ho_stats(),
+ NObjs :: non_neg_integer()) -> NewStats :: ho_stats().
incr_objs(Stats = #ho_stats{objs = Objs}, NObjs) ->
Stats#ho_stats{objs = Objs + NObjs}.
%% @private
-%%
%% @doc Check if the interval has elapsed and if so send handoff stats
%% for `ModSrcTgt' to the manager and return a new stats record
-%% `NetStats'.
--spec maybe_send_status({module(), non_neg_integer(),
- non_neg_integer()},
- ho_stats()) -> NewStats :: ho_stats().
+%% `NewStats'.
+%% @param ModSrcTgt Tripel of module, source and target index.
+%% @param Stats Stats to send.
+-spec maybe_send_status(ModSrcTgt :: mod_src_tgt(),
+ Stats :: ho_stats()) -> NewStats :: ho_stats().
maybe_send_status(ModSrcTgt,
Stats = #ho_stats{interval_end = IntervalEnd}) ->
@@ -518,26 +657,58 @@ maybe_send_status(ModSrcTgt,
false -> Stats
end.
+%% @private
+%% @doc Get the currently set interval between status updates in seconds.
+-spec get_status_interval() -> integer().
+
get_status_interval() ->
application:get_env(riak_core, handoff_status_interval,
?STATUS_INTERVAL).
+%% @private
+%% @doc Get the index of the source partition of the handoff from options.
+%% @param Opts Handoff options.
+-spec get_src_partition(Opts :: [{atom(),
+ term()}]) -> index().
+
get_src_partition(Opts) ->
proplists:get_value(src_partition, Opts).
+%% @private
+%% @doc Get the index of the target partition of the handoff from options.
+%% @param Opts Handoff options.
+-spec get_target_partition(Opts :: [{atom(),
+ term()}]) -> index().
+
get_target_partition(Opts) ->
proplists:get_value(target_partition, Opts).
+%% @private
+%% @doc Get the initial accumulator of items not sent.
+%% @param Opts Options to retrieve the accumulator from.
+-spec get_notsent_acc0(Opts :: [{atom(),
+ term()}]) -> ho_acc().
+
get_notsent_acc0(Opts) ->
proplists:get_value(notsent_acc0, Opts).
+%% @private
+%% @doc Get the function to handle items not sent.
+%% @param Opts Options to retrieve the function from.
+-spec get_notsent_fun(Opts :: [{atom(),
+ term()}]) -> function().
+
get_notsent_fun(Opts) ->
case proplists:get_value(notsent_fun, Opts) of
none -> fun (_, _) -> undefined end;
Fun -> Fun
end.
--spec get_filter(proplists:proplist()) -> predicate().
+%% @private
+%% @doc Retrieve the filter predicate.
+%% @param Opts Options to retrieve the predicate from.
+-spec get_filter(Opts ::
+ proplists:proplist()) -> predicate().
get_filter(Opts) ->
case proplists:get_value(filter, Opts) of
@@ -546,9 +717,11 @@ get_filter(Opts) ->
end.
%% @private
-%%
-%% @doc check if the handoff reciever will accept batching messages
-%% otherwise fall back to the slower, object-at-a-time path
+%% @doc Check if the handoff reciever will accept batching messages
+%% otherwise fall back to the slower, object-at-a-time path.
+%% @param Node Node to check.
+-spec remote_supports_batching(Node ::
+ node()) -> boolean().
remote_supports_batching(Node) ->
case catch rpc:call(Node, riak_core_handoff_receiver,
@@ -573,6 +746,13 @@ remote_supports_batching(Node) ->
%% worker pid is passed so the vnode may use that information in its
%% decision to cancel the handoff or not e.g. get a lock on behalf of
%% the process.
+%% @param Module Module handling the handoff.
+%% @param SrcPartition Index of the source partition.
+%% @returns Options for the fold handoff.
+-spec maybe_call_handoff_started(Module :: module(),
+ SrcPartition ::
+ index()) -> proplists:proplist().
+
maybe_call_handoff_started(Module, SrcPartition) ->
case lists:member({handoff_started, 2},
Module:module_info(exports))
diff --git a/src/riak_core_handoff_sender_sup.erl b/src/riak_core_handoff_sender_sup.erl
index cbb503c0d..9334f59ec 100644
--- a/src/riak_core_handoff_sender_sup.erl
+++ b/src/riak_core_handoff_sender_sup.erl
@@ -38,6 +38,14 @@
%%% API
%%%===================================================================
+%% @doc Begin the supervisor, init/1 will be called
+%% @see supervisor:start_link/3.
+-spec start_link() -> {ok, pid()} |
+ {error,
+ {already_started, pid()} | {shutdown | reason} |
+ term()} |
+ ignore.
+
start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
@@ -67,6 +75,17 @@ start_sender(Type, Module, TargetNode, VNode, Opts) ->
%%%===================================================================
%% @private
+%% @doc Callback for {@link supervisor:start_link/3}. Starts the
+%% {@link riak_core_handoff_sender} as its supervised child.
+%% @see riak_core_handoff_sender:start_link/0.
+%% @returns Parameters to start the supervised child.
+-spec init([]) -> {ok,
+ {{simple_one_for_one, 10, 10},
+ [{riak_core_handoff_sender,
+ {riak_core_handoff_sender, start_link, []}, temporary,
+ brutal_kill, worker,
+ [riak_core_handoff_sender]}, ...]}}.
+
init([]) ->
{ok,
{{simple_one_for_one, 10, 10},
diff --git a/src/riak_core_handoff_sup.erl b/src/riak_core_handoff_sup.erl
index 518f89eaf..a458c2999 100644
--- a/src/riak_core_handoff_sup.erl
+++ b/src/riak_core_handoff_sup.erl
@@ -29,11 +29,42 @@
{I, {I, start_link, []}, permanent, brutal_kill, Type,
[I]}).
-%% begins the supervisor, init/1 will be called
+%% @doc Begin the supervisor, init/1 will be called
+%% @see supervisor:start_link/3.
+-spec start_link() -> {ok, pid()} |
+ {error,
+ {already_started, pid()} | {shutdown | reason} |
+ term()} |
+ ignore.
+
start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
%% @private
+%% @doc Callback for {@link supervisor:start_link/3}. Starts the
+%% {@link riak_core_handoff_receiver_sup},
+%% {@link riak_core_handoff_sender_sup},
+%% {@link riak_core_handoff_listener_sup}, and
+%% {@link riak_core_handoff_manager} as its supervised children.
+%% @see riak_core_handoff_sender:start_link/0.
+%% @returns Parameters to start the supervised children.
+-spec init([]) -> {ok,
+ {{one_for_all, 10, 10},
+ [{riak_core_handoff_receiver_sup |
+ riak_core_handoff_sender_sup |
+ riak_core_handoff_listener_sup |
+ riak_core_handoff_manager,
+ {riak_core_handoff_receiver_sup |
+ riak_core_handoff_sender_sup |
+ riak_core_handoff_listener_sup |
+ riak_core_handoff_manager,
+ start_link, []},
+ permanent, brutal_kill, supervisor | worker,
+ [riak_core_handoff_receiver_sup |
+ riak_core_handoff_sender_sup |
+ riak_core_handoff_listener_sup |
+ riak_core_handoff_manager]}]}}.
+
init([]) ->
{ok,
{{one_for_all, 10, 10},
diff --git a/src/riak_core_ring.erl b/src/riak_core_ring.erl
index eaecd191a..feafec328 100644
--- a/src/riak_core_ring.erl
+++ b/src/riak_core_ring.erl
@@ -188,15 +188,25 @@ set_chash(State, CHash) ->
all_members(#chstate{members = Members}) ->
get_members(Members).
+%% @doc Produce a list of all nodes in the cluster with the given types
+-spec members(State :: chstate(),
+ Types :: [member_status()]) -> [Node :: term()].
+
members(#chstate{members = Members}, Types) ->
get_members(Members, Types).
%% @doc Produce a list of all active (not marked as down) cluster members
+-spec active_members(State :: chstate()) -> [Node ::
+ term()].
+
active_members(#chstate{members = Members}) ->
get_members(Members,
[joining, valid, leaving, exiting]).
%% @doc Returns a list of members guaranteed safe for requests
+-spec ready_members(State :: chstate()) -> [Node ::
+ term()].
+
ready_members(#chstate{members = Members}) ->
get_members(Members, [valid, leaving]).
@@ -381,6 +391,9 @@ random_other_index(State) ->
_ -> lists:nth(rand:uniform(length(L)), L)
end.
+%% @doc Return a partition index not owned by the node executing this function
+%% or contained in the exclude list.
+%% If there are no feasible index return no_indices.
-spec random_other_index(State :: chstate(),
Exclude :: [term()]) -> chash:index_as_int() |
no_indices.
@@ -546,6 +559,12 @@ future_index(CHashKey, OrigIdx, NValCheck, OrigCount,
(NextOwner + NextInc * OrigDist) rem RingTop
end.
+%% @doc Check if the index is either out of bounds of the ring size or the n
+%% value
+-spec check_invalid_future_index(non_neg_integer(),
+ pos_integer(),
+ integer() | undefined) -> boolean().
+
check_invalid_future_index(OrigDist, NextCount,
NValCheck) ->
OverRingSize = OrigDist >= NextCount,
@@ -616,6 +635,10 @@ remove_meta(Key, State) ->
claimant(#chstate{claimant = Claimant}) -> Claimant.
+%% @doc Set the new claimant.
+-spec set_claimant(State :: chstate(),
+ Claimant :: node()) -> NState :: chstate().
+
set_claimant(State, Claimant) ->
State#chstate{claimant = Claimant}.
@@ -625,9 +648,17 @@ set_claimant(State, Claimant) ->
cluster_name(State) -> State#chstate.clustername.
%% @doc Sets the unique identifer for this cluster.
+-spec set_cluster_name(State :: chstate(),
+ Name :: {term(), term()}) -> chstate().
+
set_cluster_name(State, Name) ->
State#chstate{clustername = Name}.
+%% @doc Mark the cluster names as undefined if at least one is undefined.
+%% Else leave the names unchanged.
+-spec reconcile_names(RingA :: chstate(),
+ RingB :: chstate()) -> {chstate(), chstate()}.
+
reconcile_names(RingA = #chstate{clustername = NameA},
RingB = #chstate{clustername = NameB}) ->
case (NameA =:= undefined) or (NameB =:= undefined) of
@@ -637,12 +668,24 @@ reconcile_names(RingA = #chstate{clustername = NameA},
false -> {RingA, RingB}
end.
+%% @doc Increment the vector clock and return the new state.
+-spec increment_vclock(Node :: node(),
+ State :: chstate()) -> chstate().
+
increment_vclock(Node, State) ->
VClock = vclock:increment(Node, State#chstate.vclock),
State#chstate{vclock = VClock}.
+%% @doc Return the current ring version.
+-spec ring_version(chstate()) -> vclock:vclock() |
+ undefined.
+
ring_version(#chstate{rvsn = RVsn}) -> RVsn.
+%% @doc Increment the ring version and return the new state.
+-spec increment_ring_version(node(),
+ chstate()) -> chstate().
+
increment_ring_version(Node, State) ->
RVsn = vclock:increment(Node, State#chstate.rvsn),
State#chstate{rvsn = RVsn}.
@@ -668,6 +711,11 @@ all_member_status(#chstate{members = Members}) ->
|| {Node, {Status, _VC, _}} <- Members,
Status /= invalid].
+%% @doc return the member's meta value for the given key or undefined if the
+%% member or key cannot be found.
+-spec get_member_meta(chstate(), node(),
+ atom()) -> term() | undefined.
+
get_member_meta(State, Member, Key) ->
case orddict:find(Member, State#chstate.members) of
error -> undefined;
@@ -679,12 +727,19 @@ get_member_meta(State, Member, Key) ->
end.
%% @doc Set a key in the member metadata orddict
+-spec update_member_meta(node(), chstate(), node(),
+ atom(), term()) -> chstate().
+
update_member_meta(Node, State, Member, Key, Val) ->
VClock = vclock:increment(Node, State#chstate.vclock),
State2 = update_member_meta(Node, State, Member, Key,
Val, same_vclock),
State2#chstate{vclock = VClock}.
+%% @see update_member_meta/5.
+-spec update_member_meta(node(), chstate(), node(),
+ atom(), term(), same_vclock) -> chstate().
+
update_member_meta(Node, State, Member, Key, Val,
same_vclock) ->
Members = State#chstate.members,
@@ -700,6 +755,10 @@ update_member_meta(Node, State, Member, Key, Val,
false -> State
end.
+%% @doc Remove the meta entries for the given member.
+-spec clear_member_meta(node(), chstate(),
+ node()) -> chstate().
+
clear_member_meta(Node, State, Member) ->
Members = State#chstate.members,
case orddict:is_key(Member, Members) of
@@ -714,22 +773,46 @@ clear_member_meta(Node, State, Member) ->
false -> State
end.
+%% @doc Mark a member as joining
+-spec add_member(node(), chstate(),
+ node()) -> chstate().
+
add_member(PNode, State, Node) ->
set_member(PNode, State, Node, joining).
+%% @doc Mark a member as invalid
+-spec remove_member(node(), chstate(),
+ node()) -> chstate().
+
remove_member(PNode, State, Node) ->
State2 = clear_member_meta(PNode, State, Node),
set_member(PNode, State2, Node, invalid).
+%% @doc Mark a member as leaving
+-spec leave_member(node(), chstate(),
+ node()) -> chstate().
+
leave_member(PNode, State, Node) ->
set_member(PNode, State, Node, leaving).
+%% @doc Mark a member as exiting
+-spec exit_member(node(), chstate(),
+ node()) -> chstate().
+
exit_member(PNode, State, Node) ->
set_member(PNode, State, Node, exiting).
+%% @doc Mark a member as down
+-spec down_member(node(), chstate(),
+ node()) -> chstate().
+
down_member(PNode, State, Node) ->
set_member(PNode, State, Node, down).
+%% @doc Mark a member with the given status
+-spec set_member(node(), chstate(), node(),
+ member_status()) -> chstate().
+
set_member(Node, CState, Member, Status) ->
VClock = vclock:increment(Node, CState#chstate.vclock),
CState2 = set_member(Node, CState, Member, Status,
@@ -784,6 +867,8 @@ indices(State, Node) ->
future_indices(State, Node) ->
indices(future_ring(State), Node).
+%% @doc Return all node entries that will exist after the pending changes are
+%% applied.
-spec all_next_owners(chstate()) -> [{integer(),
term()}].
@@ -792,6 +877,10 @@ all_next_owners(CState) ->
[{Idx, NextOwner} || {Idx, _, NextOwner, _, _} <- Next].
%% @private
+%% Change the owner of the indices to the new owners.
+-spec change_owners(chstate(),
+ [{integer(), node()}]) -> chstate().
+
change_owners(CState, Reassign) ->
lists:foldl(fun ({Idx, NewOwner}, CState0) ->
%% if called for indexes not in the current ring (during resizing)
@@ -804,6 +893,9 @@ change_owners(CState, Reassign) ->
CState, Reassign).
%% @doc Return all indices that a node is scheduled to give to another.
+-spec disowning_indices(chstate(),
+ node()) -> [integer()].
+
disowning_indices(State, Node) ->
case is_resizing(State) of
false ->
@@ -817,6 +909,10 @@ disowning_indices(State, Node) ->
disowned_during_resize(State, Idx, Owner)]
end.
+%% @doc Check if the owner of the index changes during resize.
+-spec disowned_during_resize(chstate(), integer(),
+ node()) -> boolean().
+
disowned_during_resize(CState, Idx, Owner) ->
%% catch error when index doesn't exist, we are disowning it if its going away
NextOwner = try future_owner(CState, Idx) catch
@@ -828,10 +924,18 @@ disowned_during_resize(CState, Idx, Owner) ->
end.
%% @doc Returns a list of all pending ownership transfers.
+-spec pending_changes(chstate()) -> [{integer(), term(),
+ term(), [module()], awaiting | complete}].
+
pending_changes(State) ->
%% For now, just return next directly.
State#chstate.next.
+%% @doc Set the transfers as pending changes
+-spec set_pending_changes(chstate(),
+ [{integer(), term(), term(), [module()],
+ awaiting | complete}]) -> chstate().
+
set_pending_changes(State, Transfers) ->
State#chstate{next = Transfers}.
@@ -872,6 +976,8 @@ set_pending_resize(Resizing, Orig) ->
SortedNext),
FutureCHash).
+%% @doc Abort the resizing procedure if possible and return true on a succesfull
+%% abort.
-spec maybe_abort_resize(chstate()) -> {boolean(),
chstate()}.
@@ -890,11 +996,13 @@ maybe_abort_resize(State) ->
false -> {false, State}
end.
+%% @doc Set the resize abort value to true.
-spec set_pending_resize_abort(chstate()) -> chstate().
set_pending_resize_abort(State) ->
update_meta('$resized_ring_abort', true, State).
+%% @doc Add the transfar from source to target to the scheduled transfers.
-spec schedule_resize_transfer(chstate(),
{integer(), term()},
integer() | {integer(), term()}) -> chstate().
@@ -934,6 +1042,10 @@ reschedule_resize_transfers(State = #chstate{next =
State, Next),
NewState#chstate{next = NewNext}.
+%% @doc Reset the status of a resize operation
+-spec reschedule_resize_operation(pos_integer(), node(),
+ term(), chstate()) -> {term(), chstate()}.
+
reschedule_resize_operation(N, NewNode,
{Idx, N, '$resize', _Mods, _Status}, State) ->
NewEntry = {Idx, NewNode, '$resize', ordsets:new(),
@@ -955,6 +1067,12 @@ reschedule_resize_operation(Node, NewNode,
false -> {Entry, State}
end.
+%% @see reschedule_resize_operation/4.
+-spec reschedule_inbound_resize_transfers({integer(),
+ term()},
+ node(), node(),
+ chstate()) -> {boolean(), chstate()}.
+
reschedule_inbound_resize_transfers(Source, Node,
NewNode, State) ->
F = fun (Transfer, Acc) ->
diff --git a/src/riak_core_ring_manager.erl b/src/riak_core_ring_manager.erl
index 24a4893e7..46718dc5e 100644
--- a/src/riak_core_ring_manager.erl
+++ b/src/riak_core_ring_manager.erl
@@ -61,6 +61,8 @@
-behaviour(gen_server).
+-type ring() :: riak_core_ring:riak_core_ring().
+
-export([start_link/0, start_link/1, get_my_ring/0,
get_raw_ring/0, get_raw_ring_chashbin/0,
get_chash_bin/0, get_ring_id/0, refresh_my_ring/0,
@@ -128,6 +130,9 @@ get_my_ring() ->
undefined -> {error, no_ring}
end.
+%% @doc Retrieve the ring currently stored on this local node.
+-spec get_raw_ring() -> {ok, ring()}.
+
get_raw_ring() ->
try Ring = ets:lookup_element(?ETS, raw_ring, 2),
{ok, Ring}
diff --git a/src/riak_core_vnode.erl b/src/riak_core_vnode.erl
index 72413bd05..917fffffc 100644
--- a/src/riak_core_vnode.erl
+++ b/src/riak_core_vnode.erl
@@ -323,10 +323,15 @@ send_all_proxy_req(VNode, Req) ->
gen_fsm_compat:send_all_state_event(VNode, Req).
%% #16 - riak:core_handoff_sender - start_fold_
+-spec handoff_complete(VNode :: pid()) -> ok.
+
handoff_complete(VNode) ->
gen_fsm_compat:send_event(VNode, handoff_complete).
%% #17 - riak:core_handoff_sender - start_fold_
+-spec resize_transfer_complete(VNode :: pid(),
+ NotSentAcc :: term()) -> ok.
+
resize_transfer_complete(VNode, NotSentAcc) ->
gen_fsm_compat:send_event(VNode,
{resize_transfer_complete, NotSentAcc}).