From bcded307f4c454b9e83989cb2d174de21a002a21 Mon Sep 17 00:00:00 2001 From: "Gyeong-Rok.Lee" Date: Fri, 11 Jan 2019 16:18:53 +0900 Subject: [PATCH 01/23] [LC-116] Fix bug to reconnect grpc connection very fast continuously Signed-off-by: Gyeong-Rok.Lee --- loopchain/baseservice/broadcast_scheduler.py | 53 ++++++++++++-------- loopchain/baseservice/stub_manager.py | 20 +++++++- 2 files changed, 50 insertions(+), 23 deletions(-) diff --git a/loopchain/baseservice/broadcast_scheduler.py b/loopchain/baseservice/broadcast_scheduler.py index 7eae41465..4f151a974 100644 --- a/loopchain/baseservice/broadcast_scheduler.py +++ b/loopchain/baseservice/broadcast_scheduler.py @@ -142,7 +142,12 @@ def schedule_broadcast(self, method_name, method_param, *, retry_times=None, tim self.schedule_job(BroadcastCommand.BROADCAST, (method_name, method_param, kwargs)) - def __broadcast_retry_async(self, peer_target, method_name, method_param, retry_times, timeout, result): + def __keep_grpc_connection(self, result, timeout, stub_manager: StubManager): + return isinstance(result, _Rendezvous) \ + and result.code() in (grpc.StatusCode.DEADLINE_EXCEEDED, grpc.StatusCode.UNAVAILABLE) \ + and stub_manager.elapsed_last_succeed_time() < timeout + + def __broadcast_retry_async(self, peer_target, method_name, method_param, retry_times, timeout, stub, result): if isinstance(result, _Rendezvous) and result.code() == grpc.StatusCode.OK: return if isinstance(result, futures.Future) and not result.exception(): @@ -150,8 +155,16 @@ def __broadcast_retry_async(self, peer_target, method_name, method_param, retry_ logging.debug(f"try retry to : peer_target({peer_target})\n") if retry_times > 0: - retry_times -= 1 - self.__call_async_to_target(peer_target, method_name, method_param, False, retry_times, timeout) + try: + stub_manager: StubManager = self.__audience[peer_target] + if not stub_manager: + logging.warning(f"broadcast_thread:__broadcast_retry_async Failed to connect to ({peer_target}).") + return + retry_times -= 1 + is_stub_reuse = stub_manager.stub != stub or self.__keep_grpc_connection(result, timeout, stub_manager) + self.__call_async_to_target(peer_target, method_name, method_param, is_stub_reuse, retry_times, timeout) + except KeyError as e: + logging.debug(f"broadcast_thread:__broadcast_retry_async ({peer_target}) not in audience. ({e})") else: if isinstance(result, _Rendezvous): exception = result.details() @@ -167,26 +180,24 @@ def __broadcast_retry_async(self, peer_target, method_name, method_param, retry_ def __call_async_to_target(self, peer_target, method_name, method_param, is_stub_reuse, retry_times, timeout): try: - call_back_partial = None - stub_item = None - - if peer_target in self.__audience.keys(): - call_back_partial = partial(self.__broadcast_retry_async, - peer_target, - method_name, - method_param, - retry_times, - timeout) - stub_item = self.__audience[peer_target] + stub_item: StubManager = self.__audience[peer_target] + if not stub_item: + logging.debug(f"broadcast_thread:__call_async_to_target Failed to connect to ({peer_target}).") + return + call_back_partial = partial(self.__broadcast_retry_async, + peer_target, + method_name, + method_param, + retry_times, + timeout, + stub_item.stub) + stub_item.call_async(method_name=method_name, + message=method_param, + is_stub_reuse=is_stub_reuse, + call_back=call_back_partial, + timeout=timeout) except KeyError as e: logging.debug(f"broadcast_thread:__call_async_to_target ({peer_target}) not in audience. ({e})") - else: - if stub_item: - stub_item.call_async(method_name=method_name, - message=method_param, - is_stub_reuse=is_stub_reuse, - call_back=call_back_partial, - timeout=timeout) def __broadcast_run_async(self, method_name, method_param, retry_times=None, timeout=None): """call gRPC interface of audience diff --git a/loopchain/baseservice/stub_manager.py b/loopchain/baseservice/stub_manager.py index 25ab36ae2..cfde6b9f2 100644 --- a/loopchain/baseservice/stub_manager.py +++ b/loopchain/baseservice/stub_manager.py @@ -36,6 +36,7 @@ def __init__(self, target, stub_type, ssl_auth_type=conf.SSLAuthType.none): self.__stub = None self.__channel = None self.__stub_update_time = datetime.datetime.now() + self.__last_succeed_time = time.clock_gettime(time.CLOCK_MONOTONIC) self.__make_stub(False) @@ -47,6 +48,8 @@ def __make_stub(self, is_stub_reuse=True): self.__stub, self.__channel = util.get_stub_to_server( self.__target, self.__stub_type, is_check_status=False, ssl_auth_type=self.__ssl_auth_type) self.__stub_update_time = datetime.datetime.now() + if self.__stub: + self.__update_last_succeed_time() else: pass @@ -64,6 +67,12 @@ def stub(self, value): def target(self): return self.__target + def elapsed_last_succeed_time(self): + return time.clock_gettime(time.CLOCK_MONOTONIC) - self.__last_succeed_time + + def __update_last_succeed_time(self): + self.__last_succeed_time = time.clock_gettime(time.CLOCK_MONOTONIC) + def call(self, method_name, message, timeout=None, is_stub_reuse=True, is_raise=False): if timeout is None: timeout = conf.GRPC_TIMEOUT @@ -71,7 +80,9 @@ def call(self, method_name, message, timeout=None, is_stub_reuse=True, is_raise= try: stub_method = getattr(self.__stub, method_name) - return stub_method(message, timeout) + ret = stub_method(message, timeout) + self.__update_last_succeed_time() + return ret except Exception as e: logging.warning(f"gRPC call fail method_name({method_name}), message({message}): {e}") if is_raise: @@ -92,10 +103,15 @@ def call_async(self, method_name, message, call_back=None, timeout=None, is_stub call_back = self.print_broadcast_fail self.__make_stub(is_stub_reuse) + def done_callback(result: _Rendezvous): + if result.code() == grpc.StatusCode.OK: + self.__update_last_succeed_time() + call_back(result) + try: stub_method = getattr(self.__stub, method_name) feature_future = stub_method.future(message, timeout) - feature_future.add_done_callback(call_back) + feature_future.add_done_callback(done_callback) return feature_future except Exception as e: logging.warning(f"gRPC call_async fail method_name({method_name}), message({message}): {e}, " From 0a8ea84d3392d1ca993eff5be5ce69e1c521296d Mon Sep 17 00:00:00 2001 From: Daehee Kim Date: Fri, 18 Jan 2019 17:59:12 +0900 Subject: [PATCH 02/23] [LC-126] Do refactoring IcxVerifier and IcxAuthorization. --- loopchain/channel/channel_service.py | 11 +- loopchain/consensus/vote_message.py | 2 +- loopchain/peer/consensus_siever.py | 2 +- loopchain/peer/icx_authorization.py | 185 ++++++++++----- loopchain/peer/peer_authorization.py | 233 ------------------- loopchain/peer/peer_service.py | 5 +- loopchain/tools/signature_helper.py | 327 --------------------------- loopchain/utils/__init__.py | 3 - testcase/unittest/test_util.py | 10 +- 9 files changed, 136 insertions(+), 642 deletions(-) delete mode 100644 loopchain/peer/peer_authorization.py delete mode 100644 loopchain/tools/signature_helper.py diff --git a/loopchain/channel/channel_service.py b/loopchain/channel/channel_service.py index 19bcae4f4..df6062f42 100644 --- a/loopchain/channel/channel_service.py +++ b/loopchain/channel/channel_service.py @@ -33,8 +33,7 @@ from loopchain.channel.channel_statemachine import ChannelStateMachine from loopchain.consensus import Consensus, Acceptor, Proposer from loopchain.peer import BlockManager -from loopchain.peer.icx_authorization import IcxAuthorization -from loopchain.peer.peer_authorization import PeerAuthorization +from loopchain.peer.icx_authorization import Signer from loopchain.protos import loopchain_pb2_grpc, message_code, loopchain_pb2 from loopchain.utils import loggers, command_arguments from loopchain.utils.icon_service import convert_params, ParamType, response_to_json_query @@ -46,7 +45,7 @@ def __init__(self, channel_name, amqp_target, amqp_key): self.__block_manager: BlockManager = None self.__score_container: CommonSubprocess = None self.__score_info: dict = None - self.__peer_auth: IcxAuthorization = None + self.__peer_auth: Signer = None self.__peer_manager: PeerManager = None self.__broadcast_scheduler: BroadcastScheduler = None self.__radio_station_stub = None @@ -252,8 +251,7 @@ async def subscribe_network(self): def __init_peer_auth(self): try: - self.__peer_auth = IcxAuthorization(ChannelProperty().name) - + self.__peer_auth = Signer.from_channel(ChannelProperty().name) except Exception as e: logging.exception(f"peer auth init fail cause : {e}") util.exit_and_msg(f"peer auth init fail cause : {e}") @@ -417,8 +415,7 @@ def connect_to_radio_station(self, is_reconnect=False): peer_object=b'', peer_id=ChannelProperty().peer_id, peer_target=ChannelProperty().peer_target, - group_id=ChannelProperty().group_id, - cert=self.peer_auth.peer_cert), + group_id=ChannelProperty().group_id), retry_times=conf.CONNECTION_RETRY_TIMES_TO_RS, is_stub_reuse=True, timeout=conf.CONNECTION_TIMEOUT_TO_RS) diff --git a/loopchain/consensus/vote_message.py b/loopchain/consensus/vote_message.py index 494470edd..542b61637 100644 --- a/loopchain/consensus/vote_message.py +++ b/loopchain/consensus/vote_message.py @@ -142,7 +142,7 @@ def loads(self, dumps: str): def sign(self, peer_auth): vote_hash = self.__get_vote_hash(json_data=self.get_vote_to_json(), need_sign=False) - self.__signature = peer_auth.sign_data(vote_hash, is_hash=True) + self.__signature = peer_auth.sign(vote_hash, is_hash=True) def get_vote_data(self): vote_json_data = self.get_vote_to_json() diff --git a/loopchain/peer/consensus_siever.py b/loopchain/peer/consensus_siever.py index 232f7f538..9effeb53d 100644 --- a/loopchain/peer/consensus_siever.py +++ b/loopchain/peer/consensus_siever.py @@ -89,7 +89,7 @@ async def consensus(self): block_builder.height = last_block.header.height + 1 block_builder.prev_hash = last_block.header.hash block_builder.next_leader = next_leader - block_builder.peer_private_key = ObjectManager().channel_service.peer_auth.peer_private_key + block_builder.private_key = ObjectManager().channel_service.peer_auth.private_key block_builder.confirm_prev_block = vote_result or (self._made_block_count > 0) candidate_block = block_builder.build() diff --git a/loopchain/peer/icx_authorization.py b/loopchain/peer/icx_authorization.py index 3baadd29d..973e9d86d 100644 --- a/loopchain/peer/icx_authorization.py +++ b/loopchain/peer/icx_authorization.py @@ -19,61 +19,105 @@ from asn1crypto import keys from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization -from secp256k1 import PrivateKey +from secp256k1 import PrivateKey, PublicKey -import loopchain.utils as util -from loopchain import configure as conf -from loopchain.tools.signature_helper import PublicVerifier, IcxVerifier +class SignVerifier: + _pri = PrivateKey() -def long_to_bytes (val, endianness='big'): - """ - Use :ref:`string formatting` and :func:`~binascii.unhexlify` to - convert ``val``, a :func:`long`, to a byte :func:`str`. + def __init__(self): + self.address: str = None - :param long val: The value to pack + def verify_data(self, origin_data: bytes, signature: bytes): + return self.verify_signature(origin_data, signature, False) - :param str endianness: The endianness of the result. ``'big'`` for - big-endian, ``'little'`` for little-endian. - - If you want byte- and word-ordering to differ, you're on your own. + def verify_hash(self, origin_data, signature): + return self.verify_signature(origin_data, signature, True) - Using :ref:`string formatting` lets us use Python's C innards. - """ + def verify_address(self, pubkey: bytes): + return self.address_from_pubkey(pubkey) != self.address - # one (1) hex digit per four (4) bits - width = val.bit_length() - - # unhexlify wants an even multiple of eight (8) bits, but we don't - # want more digits than we need (hence the ternary-ish 'or') - width += 8 - ((width % 8) or 8) - - # format width specifier: four (4) bits per hex digit - fmt = '%%0%dx' % (width // 4) + def verify_signature(self, origin_data: bytes, signature: bytes, is_hash): + try: + if is_hash: + origin_data = binascii.unhexlify(origin_data) + origin_signature, recover_code = signature[:-1], signature[-1] + recoverable_sig = self._pri.ecdsa_recoverable_deserialize(origin_signature, recover_code) + pub = self._pri.ecdsa_recover(origin_data, + recover_sig=recoverable_sig, + raw=is_hash, + digest=hashlib.sha3_256) + extract_pub = PublicKey(pub).serialize(compressed=False) + return self.verify_address(extract_pub) + except Exception: + logging.debug(f"signature verify fail : {origin_data} {signature}") + return False + + @classmethod + def from_address(cls, address: str): + verifier = SignVerifier() + verifier.address = address + return verifier + + @classmethod + def from_pubkey(cls, pubkey: bytes): + address = cls.address_from_pubkey(pubkey) + return cls.from_address(address) + + @classmethod + def from_prikey(cls, prikey: bytes): + address = cls.address_from_prikey(prikey) + return cls.from_address(address) + + @classmethod + def address_from_pubkey(cls, pubkey: bytes): + hash_pub = hashlib.sha3_256(pubkey[1:]).hexdigest() + return f"hx{hash_pub[-40:]}" + + @classmethod + def address_from_prikey(cls, prikey: bytes): + pubkey = PrivateKey(prikey).pubkey.serialize(compressed=False) + return cls.address_from_pubkey(pubkey) + + +class Signer(SignVerifier): + def __init__(self): + super().__init__() + self.private_key: PrivateKey = None - # prepend zero (0) to the width, to zero-pad the output - s = binascii.unhexlify(fmt % val) + def sign(self, data, is_hash=False): + if is_hash: + if isinstance(data, str): + try: + data = data.split("0x")[1] if data.startswith("0x") else data + data = binascii.unhexlify(data) + except Exception as e: + logging.error(f"hash data must hex string or bytes \n exception : {e}") + return None - if endianness == 'little': - # see http://stackoverflow.com/a/931095/309233 - s = s[::-1] + if not isinstance(data, (bytes, bytearray)): + logging.error(f"data must be bytes \n") + return None - return s + signature = self.private_key.ecdsa_sign_recoverable(msg=data, + raw=is_hash, + digest=hashlib.sha3_256) + serialized_sig = self._pri.ecdsa_recoverable_serialize(signature) + return b''.join([serialized_sig[0], bytes([serialized_sig[1]])]) + @classmethod + def from_channel(cls, channel: str): + from loopchain import configure as conf -class IcxAuthorization(IcxVerifier): - def __init__(self, channel): - super().__init__() - self.__channel = channel - with open(conf.CHANNEL_OPTION[self.__channel][PublicVerifier.PRIVATE_PATH], "rb") as der: + with open(conf.CHANNEL_OPTION[channel]["private_path"], "rb") as der: private_bytes = der.read() - private_pass = conf.CHANNEL_OPTION[self.__channel][PublicVerifier.PRIVATE_PASSWORD] + private_pass = conf.CHANNEL_OPTION[channel]["private_password"] if isinstance(private_pass, str): private_pass = private_pass.encode() try: try: - temp_private = serialization\ + temp_private = serialization \ .load_der_private_key(private_bytes, private_pass, default_backend()) @@ -92,35 +136,52 @@ def __init__(self, channel): encryption_algorithm=serialization.NoEncryption() ) key_info = keys.PrivateKeyInfo.load(no_pass_private) + prikey = long_to_bytes(key_info['private_key'].native['private_key']) + return cls.from_prikey(prikey) + + @classmethod + def from_prikey(cls, prikey: bytes): + auth = Signer() + auth.private_key = PrivateKey(prikey) + auth.address = cls.address_from_prikey(prikey) + + # verify + sign = auth.sign(b'TEST') + if auth.verify_data(b'TEST', sign) is False: + raise ValueError("Invalid Signature(Peer Certificate load test)") + return auth - self.__peer_pri = PrivateKey(long_to_bytes(key_info['private_key'].native['private_key'])) - self._init_using_pub(self.__peer_pri.pubkey.serialize(compressed=False)) - # 키 쌍 검증 - sign = self.sign_data(b'TEST') - if self.verify_data(b'TEST', sign) is False: - raise ValueError("Invalid Signature(Peer Certificate load test)") +def long_to_bytes (val, endianness='big'): + """ + Use :ref:`string formatting` and :func:`~binascii.unhexlify` to + convert ``val``, a :func:`long`, to a byte :func:`str`. - @property - def peer_private_key(self): - return self.__peer_pri + :param long val: The value to pack - def sign_data(self, data, is_hash=False): - if is_hash: - if isinstance(data, str): - try: - data = binascii.unhexlify(util.trim_hex(data)) - except Exception as e: - logging.error(f"hash data must hex string or bytes \n exception : {e}") - return None + :param str endianness: The endianness of the result. ``'big'`` for + big-endian, ``'little'`` for little-endian. - if not isinstance(data, (bytes, bytearray)): - logging.error(f"data must be bytes \n") - return None + If you want byte- and word-ordering to differ, you're on your own. - signature = self.__peer_pri.ecdsa_sign_recoverable(msg=data, - raw=is_hash, - digest=hashlib.sha3_256) - serialized_sig = self._pri.ecdsa_recoverable_serialize(signature) + Using :ref:`string formatting` lets us use Python's C innards. + """ - return b''.join([serialized_sig[0], bytes([serialized_sig[1]])]) + # one (1) hex digit per four (4) bits + width = val.bit_length() + + # unhexlify wants an even multiple of eight (8) bits, but we don't + # want more digits than we need (hence the ternary-ish 'or') + width += 8 - ((width % 8) or 8) + + # format width specifier: four (4) bits per hex digit + fmt = '%%0%dx' % (width // 4) + + # prepend zero (0) to the width, to zero-pad the output + s = binascii.unhexlify(fmt % val) + + if endianness == 'little': + # see http://stackoverflow.com/a/931095/309233 + s = s[::-1] + + return s diff --git a/loopchain/peer/peer_authorization.py b/loopchain/peer/peer_authorization.py deleted file mode 100644 index edb079183..000000000 --- a/loopchain/peer/peer_authorization.py +++ /dev/null @@ -1,233 +0,0 @@ -# Copyright 2018 ICON Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" A class for authorization of Peer """ - -import binascii -import datetime -import logging - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives import hashes, serialization -from cryptography.hazmat.primitives.asymmetric import rsa, padding, ec, utils -from cryptography.x509 import Certificate - -import loopchain.utils as util -from loopchain import configure as conf -from loopchain.configure_default import KeyLoadType -from loopchain.tools.signature_helper import PublicVerifier - - -class PeerAuthorization(PublicVerifier): - """Peer의 인증을 처리한다""" - __ca_cert = None - __token = None - - # RequestPeer 요청 생성 시 저장 정보 - __peer_info = None - - def __init__(self, channel, rand_table=None): - """create key_pair for signature using conf.CHANNEL_OPTION - - :param channel: channel name - :param rand_table: for RandomTable Derivation key set, create using table - :param agent_pin: for KMS, kms connection agent pin - """ - - super().__init__(channel) - self.__peer_pri = None - - # option check - if not self._channel_option[self.LOAD_CERT]: - if self._channel_option[self.CONSENSUS_CERT_USE] or self._channel_option[self.TX_CERT_USE]: - logging.error("public key load type can't use cert") - util.exit_and_msg("public key load type can't use cert") - - try: - if self._channel_option[self.KEY_LOAD_TYPE] == conf.KeyLoadType.FILE_LOAD: - logging.info("key load type : file load") - logging.info(f"public file : {conf.CHANNEL_OPTION[self._channel][self.PUBLIC_PATH]}") - logging.info(f"private file : {conf.CHANNEL_OPTION[self._channel][self.PRIVATE_PATH]}") - - # load public key - with open(conf.CHANNEL_OPTION[self._channel][self.PUBLIC_PATH], "rb") as public: - public_bytes = public.read() - if conf.CHANNEL_OPTION[self._channel][self.LOAD_CERT]: - self.__load_cert(public_bytes) - else: - self.__load_public(public_bytes) - - # load private key - self.__load_private(pri_path=conf.CHANNEL_OPTION[self._channel][self.PRIVATE_PATH], - pri_pass=conf.CHANNEL_OPTION[self._channel][self.PRIVATE_PASSWORD]) - - elif self._channel_option[self.KEY_LOAD_TYPE] == conf.KeyLoadType.KMS_LOAD: - from loopchain.tools.kms_helper import KmsHelper - cert, private = KmsHelper().get_signature_cert_pair(conf.CHANNEL_OPTION[self._channel][self.KEY_ID]) - # KMS not support public key load - if conf.CHANNEL_OPTION[self._channel][self.LOAD_CERT]: - self.__load_cert(cert) - else: - raise Exception("KMS Load does't support public key load") - - self.__load_private_byte(private) - - elif self._channel_option[self.KEY_LOAD_TYPE] == KeyLoadType.RANDOM_TABLE_DERIVATION: - logging.info("key load type : random table derivation") - # Random Table derivation not support cert key load - if conf.CHANNEL_OPTION[self._channel][self.LOAD_CERT]: - raise Exception("KMS Load does't support public key load") - - self.__peer_pri = self.__key_derivation(rand_table) - self._load_public_from_object(self.__peer_pri.public_key()) - - else: - raise Exception(f"conf.KEY_LOAD_TYPE : {conf.CHANNEL_OPTION[channel][self.KEY_LOAD_TYPE]}" - f"\nkey load option is wrong") - - except Exception as e: - logging.error(e) - util.exit_and_msg(f"key load fail cause : {e}") - - def __load_public(self, public_bytes): - """load certificate - - :param public_bytes: der or pem format certificate - """ - try: - self._load_public_from_der(public_bytes) - except Exception as e: - self._load_public_from_pem(public_bytes) - - def __load_cert(self, cert_bytes: bytes): - """load certificate - - :param cert_bytes: der or pem format certificate - """ - try: - cert: Certificate = self._load_cert_from_der(cert_bytes) - except Exception as e: - cert: Certificate = self._load_cert_from_pem(cert_bytes) - - def __load_private(self, pri_path, pri_pass=None): - """인증서 로드 - - :param pri_path: 개인키 경로 - :param pri_pass: 개인키 패스워드 - :return: - """ - if isinstance(pri_pass, str): - pri_pass = pri_pass.encode() - # 인증서/개인키 로드 - with open(pri_path, "rb") as der: - private_bytes = der.read() - self.__load_private_byte(private_bytes, pri_pass) - - def __load_private_byte(self, private_bytes, private_pass=None): - """private load from bytes string - - :param private_bytes: private byte - :param private_pass: private password - :return: - """ - - try: - try: - self.__peer_pri = serialization.load_der_private_key(private_bytes, private_pass, default_backend()) - except Exception as e: - # try pem type private load - self.__peer_pri = serialization.load_pem_private_key(private_bytes, private_pass, default_backend()) - - except ValueError as e: - logging.exception(f"error {e}") - util.exit_and_msg("Invalid Password") - - # 키 쌍 검증 - sign = self.sign_data(b'TEST') - if self.verify_data(b'TEST', sign) is False: - util.exit_and_msg("Invalid Signature(Peer Certificate load test)") - - def set_peer_info(self, peer_id, peer_target, group_id, peer_type): - self.__peer_info = b''.join([peer_id.encode('utf-8'), - peer_target.encode('utf-8'), - group_id.encode('utf-8')]) + bytes([peer_type]) - - def sign_data(self, data, is_hash=False): - """인증서 개인키로 DATA 서명 - - :param data: 서명 대상 원문 - :param is_hash: when data is hashed True - :return: 서명 데이터 - """ - hash_algorithm = hashes.SHA256() - if is_hash: - hash_algorithm = utils.Prehashed(hash_algorithm) - if isinstance(data, str): - try: - data = binascii.unhexlify(data) - except Exception as e: - logging.error(f"hash data must hex string or bytes \n exception : {e}") - return None - - if not isinstance(data, (bytes, bytearray)): - logging.error(f"data must be bytes \n") - return None - - if isinstance(self.__peer_pri, ec.EllipticCurvePrivateKeyWithSerialization): - return self.__peer_pri.sign( - data, - ec.ECDSA(hash_algorithm)) - elif isinstance(self.__peer_pri, rsa.RSAPrivateKeyWithSerialization): - return self.__peer_pri.sign( - data, - padding.PKCS1v15(), - hash_algorithm - ) - else: - logging.error("Unknown PrivateKey Type : %s", type(self.__peer_pri)) - return None - - def generate_request_sign(self, rand_key): - """RequestPeer 서명을 생성한다. - - set_peer_info 함수가 우선 실행되어야 한다. - sign_peer(peer_id || peer_target || group_id || peet_type || rand_key) - :param rand_key: 서버로 부터 수신한 랜덤 - :return: 서명 - """ - tbs_data = self.__peer_info + bytes.fromhex(rand_key) - return self.sign_data(tbs_data) - - def get_token_time(self, token): - """Token의 유효시간을 검증하고 토큰을 검증하기 위한 데이터를 반환한다. - - :param token: 검증 대상 Token - :return: 검증 실패 시 None, 성공 시 토큰 검증을 위한 데이터 - """ - token_time = token[2:18] - token_date = int(token_time, 16) - current_date = int(datetime.datetime.now().timestamp() * 1000) - if current_date < token_date: - return bytes.fromhex(token_time) - - return None - - @staticmethod - def __key_derivation(rand_table): - """key derivation using rand_table and conf.FIRST_SEED conf.SECOND_SEED - - :param rand_table: - :return: private_key - """ - hash_value = rand_table[conf.FIRST_SEED] + rand_table[conf.SECOND_SEED] + conf.MY_SEED - return ec.derive_private_key(hash_value, ec.SECP256K1(), default_backend()) diff --git a/loopchain/peer/peer_service.py b/loopchain/peer/peer_service.py index 42802b663..a3661942d 100644 --- a/loopchain/peer/peer_service.py +++ b/loopchain/peer/peer_service.py @@ -26,10 +26,9 @@ from loopchain.blockchain import * from loopchain.container import RestService, CommonService from loopchain.peer import PeerInnerService, PeerOuterService -from loopchain.peer.icx_authorization import IcxAuthorization +from loopchain.peer.icx_authorization import Signer from loopchain.protos import loopchain_pb2, loopchain_pb2_grpc, message_code from loopchain.rest_server import RestProxyServer -from loopchain.tools.signature_helper import PublicVerifier from loopchain.utils import loggers, command_arguments from loopchain.utils.message_queue import StubCollection @@ -234,7 +233,7 @@ def __run_rest_services(self, port): def __make_peer_id(self): """네트워크에서 Peer 를 식별하기 위한 UUID를 level db 에 생성한다. """ - self.__peer_id = IcxAuthorization(conf.LOOPCHAIN_DEFAULT_CHANNEL).address + self.__peer_id = Signer.from_channel(conf.LOOPCHAIN_DEFAULT_CHANNEL).address logger_preset = loggers.get_preset() logger_preset.peer_id = self.peer_id diff --git a/loopchain/tools/signature_helper.py b/loopchain/tools/signature_helper.py deleted file mode 100644 index 18258ea3e..000000000 --- a/loopchain/tools/signature_helper.py +++ /dev/null @@ -1,327 +0,0 @@ -# Copyright 2018 ICON Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Signature Helper for Tx, Vote, Block Signature verify""" -import hashlib -import logging - -import binascii -from cryptography import x509 -from cryptography.exceptions import InvalidSignature -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives import serialization, hashes -from cryptography.hazmat.primitives.asymmetric import ec, utils, rsa, padding -from cryptography.hazmat.primitives.asymmetric.ec import EllipticCurvePublicKey -from cryptography.x509 import Certificate -from secp256k1 import PrivateKey, PublicKey - -from loopchain import configure as conf - - -class PublicVerifier: - """provide signature verify function using public key""" - - # KEY OPTION JSON NAME - LOAD_CERT = "load_cert" - CONSENSUS_CERT_USE = "consensus_cert_use" - TX_CERT_USE = "tx_cert_use" - PUBLIC_PATH = "public_path" - PRIVATE_PATH = "private_path" - PRIVATE_PASSWORD = "private_password" - KEY_LOAD_TYPE = "key_load_type" - KEY_ID = "key_id" - - def __init__(self, channel): - """init members to None and set verify function you must run load_key function - - :param channel: using channel name - """ - - self._public_key: EllipticCurvePublicKey = None - self._cert: Certificate = None - self._public_der: bytes = None - self._cert_der: bytes = None - - self._channel = channel - self._channel_option = conf.CHANNEL_OPTION[self._channel] - - self._tx_verifier_load_function = None - self._consensus_verifier_load_function = None - - if self._channel_option[self.CONSENSUS_CERT_USE]: - self._consensus_verifier_load_function = self._load_cert_from_der - else: - self._consensus_verifier_load_function = self._load_public_from_der - - if self._channel_option[self.TX_CERT_USE]: - self._tx_verifier_load_function = self._load_cert_from_der - else: - self._tx_verifier_load_function = self._load_public_from_der - - def load_public_for_tx_verify(self, public): - """load public for tx signature verify - - :param public: der format public key or der format cert - :return: - """ - self._tx_verifier_load_function(public) - - def load_public_for_peer_verify(self, public): - """load public for peer signature verify - - :param public: der format public key or der format cert - :return: - """ - self._consensus_verifier_load_function(public) - - @property - def public_der(self): - if self._public_der is None: - self._public_der = self._public_key.public_bytes( - encoding=serialization.Encoding.DER, - format=serialization.PublicFormat.SubjectPublicKeyInfo - ) - return self._public_der - - @property - def cert_der(self): - if self._cert_der is None: - self._cert_der = self._cert.public_bytes( - encoding=serialization.Encoding.DER - ) - return self._cert_der - - @property - def tx_cert(self): - if self._channel_option[self.TX_CERT_USE]: - return self.cert_der - return self.public_der - - @property - def peer_cert(self): - if self._channel_option[self.TX_CERT_USE]: - return self.cert_der - return self.public_der - - def _load_public_from_der(self, public_der: bytes): - """load public key using der format public key - - :param public_der: der format public key - :raise ValueError: public_der format is wrong - """ - self._public_key = serialization.load_der_public_key( - public_der, - backend=default_backend() - ) - - def _load_public_from_object(self, public: EllipticCurvePublicKey): - """load public key using public object - - :param public: der format public key - :raise ValueError: public type is not EllipticCurvePublicKey - """ - if isinstance(public, EllipticCurvePublicKey): - self._public_key = public - else: - raise ValueError("public must EllipticCurvePublicKey Object") - - def _load_public_from_pem(self, public_pem: bytes): - """load public key using pem format public key - - :param public_pem: der format public key - :raise ValueError: public_der format is wrong - """ - self._public_key = serialization.load_pem_public_key( - public_pem, - backend=default_backend() - ) - - def _load_cert_from_der(self, cert_der): - cert: Certificate = x509.load_der_x509_certificate(cert_der, default_backend()) - self._cert = cert - self._public_key = cert.public_key() - - def _load_cert_from_pem(self, cert_pem): - cert: Certificate = x509.load_pem_x509_certificate(cert_pem, default_backend()) - self._cert = cert - self._public_key = cert.public_key() - - def verify_data(self, data, signature) -> bool: - """개인키로 서명한 데이터 검증 - - :param data: 서명 대상 원문 - :param signature: 서명 데이터 - :return: 서명 검증 결과(True/False) - """ - pub_key = self._public_key - return self.verify_data_with_publickey(public_key=pub_key, data=data, signature=signature) - - def verify_hash(self, digest, signature) -> bool: - """개인키로 서명한 해시 검증 - - :param digest: 서명 대상 해시 - :param signature: 서명 데이터 - :return: 서명 검증 결과(True/False) - """ - # if hex string - if isinstance(digest, str): - try: - digest = binascii.unhexlify(digest) - except Exception as e: - logging.warning(f"verify hash must hex or bytes {e}") - return False - - return self.verify_data_with_publickey(public_key=self._public_key, - data=digest, - signature=signature, - is_hash=True) - - @staticmethod - def verify_data_with_publickey(public_key, data: bytes, signature: bytes, is_hash: bool=False) -> bool: - """서명한 DATA 검증 - - :param public_key: 검증용 공개키 - :param data: 서명 대상 원문 - :param signature: 서명 데이터 - :param is_hash: 사전 hashed 여부(True/False - :return: 서명 검증 결과(True/False) - """ - hash_algorithm = hashes.SHA256() - if is_hash: - hash_algorithm = utils.Prehashed(hash_algorithm) - - if isinstance(public_key, ec.EllipticCurvePublicKeyWithSerialization): - try: - public_key.verify( - signature=signature, - data=data, - signature_algorithm=ec.ECDSA(hash_algorithm) - ) - return True - except InvalidSignature: - logging.debug("InvalidSignatureException_ECDSA") - else: - logging.debug("Invalid PublicKey Type : %s", type(public_key)) - - return False - - @staticmethod - def verify_data_with_publickey_rsa(public_key, data: bytes, signature: bytes, is_hash: bool=False) -> bool: - """서명한 DATA 검증 - - :param public_key: 검증용 공개키 - :param data: 서명 대상 원문 - :param signature: 서명 데이터 - :param is_hash: 사전 hashed 여부(True/False - :return: 서명 검증 결과(True/False) - """ - hash_algorithm = hashes.SHA256() - if is_hash: - hash_algorithm = utils.Prehashed(hash_algorithm) - - if isinstance(public_key, rsa.RSAPublicKeyWithSerialization): - try: - public_key.verify( - signature, - data, - padding.PKCS1v15(), - hash_algorithm - ) - return True - except InvalidSignature: - logging.debug("InvalidSignatureException_RSA") - else: - logging.debug("Unknown PublicKey Type : %s", type(public_key)) - - return False - - -class IcxVerifier: - _pri = PrivateKey() - - def __init__(self): - self._address: str = None - self._serialize_pubkey: bytes = None - - @property - def address(self): - return self._address - - @property - def peer_cert(self): - return self._serialize_pubkey - - def _init_using_pub(self, pubkey: bytes): - self._serialize_pubkey = pubkey - hash_pub = hashlib.sha3_256(self._serialize_pubkey[1:]).hexdigest() - self._address = f"hx{hash_pub[-40:]}" - - def init_and_verify_address(self, pubkey: bytes, address: str): - self._init_using_pub(pubkey) - if self._address != address: - raise ValueError(f"Invalid Address : {address}") - - def verify_data(self, origin_data: bytes, signature: bytes): - return self.__verify_signature(origin_data, signature, False) - - def verify_hash(self, origin_data, signature): - return self.__verify_signature(origin_data, signature, True) - - def __verify_signature(self, origin_data: bytes, signature: bytes, is_hash): - try: - if is_hash: - origin_data = binascii.unhexlify(origin_data) - origin_signature, recover_code = signature[:-1], signature[-1] - recoverable_sig = self._pri.ecdsa_recoverable_deserialize(origin_signature, recover_code) - pub = self._pri.ecdsa_recover(origin_data, - recover_sig=recoverable_sig, - raw=is_hash, - digest=hashlib.sha3_256) - extract_pub = PublicKey(pub).serialize(compressed=False) - return self._serialize_pubkey == extract_pub - except Exception: - logging.debug(f"signature verify fail : {origin_data} {signature}") - return False - - -class PublicVerifierContainer: - """PublicVerifier Container for often usaged""" - - __public_verifier = {} - - @classmethod - def get_public_verifier(cls, channel, serialized_public: bytes) -> PublicVerifier: - try: - channel_public_verifier_list = cls.__public_verifier[channel] - except KeyError as e: - cls.__public_verifier[channel] = {} - return cls.__create_public_verifier(channel, serialized_public) - else: - try: - return channel_public_verifier_list[serialized_public] - except KeyError as e: - return cls.__create_public_verifier(channel, serialized_public) - - @classmethod - def __create_public_verifier(cls, channel, serialized_public: bytes) -> PublicVerifier: - """create Public Verifier use serialized_public deserialize public key - - :param serialized_public: der public key - :return: PublicVerifier - """ - - public_verifier = PublicVerifier(channel) - public_verifier.load_public_for_tx_verify(serialized_public) - cls.__public_verifier[channel][serialized_public] = public_verifier - - return public_verifier diff --git a/loopchain/utils/__init__.py b/loopchain/utils/__init__.py index 0b748dec9..8ed099cb4 100644 --- a/loopchain/utils/__init__.py +++ b/loopchain/utils/__init__.py @@ -486,9 +486,6 @@ def is_hex(s): return re.fullmatch(r"^(0x)?[0-9a-f]{64}$", s or "") is not None -def trim_hex(s): - return s.split("0x")[1] if s.startswith("0x") else s - # ------------------- data utils ---------------------------- diff --git a/testcase/unittest/test_util.py b/testcase/unittest/test_util.py index ef4ff5daa..f3347117d 100644 --- a/testcase/unittest/test_util.py +++ b/testcase/unittest/test_util.py @@ -32,7 +32,7 @@ from loopchain.baseservice import ObjectManager, StubManager, Block, CommonSubprocess from loopchain.blockchain import Transaction, TransactionBuilder, TransactionVersioner, Address from loopchain.components import SingletonMetaClass -from loopchain.peer import PeerService, IcxAuthorization +from loopchain.peer import PeerService, Signer from loopchain.protos import loopchain_pb2, loopchain_pb2_grpc from loopchain.radiostation import RadioStationService from loopchain.utils import loggers @@ -233,13 +233,13 @@ def clean_up_mq(): os.system("rabbitmqctl start_app") -def create_basic_tx(peer_auth: IcxAuthorization) -> Transaction: +def create_basic_tx(peer_auth: Signer) -> Transaction: """ :param peer_auth: :return: transaction """ tx_builder = TransactionBuilder.new("0x3", TransactionVersioner()) - tx_builder.private_key = peer_auth.peer_private_key + tx_builder.private_key = peer_auth.private_key tx_builder.to_address = Address("hx3f376559204079671b6a8df481c976e7d51b3c7c") tx_builder.value = 1 tx_builder.step_limit = 100000000 @@ -247,9 +247,9 @@ def create_basic_tx(peer_auth: IcxAuthorization) -> Transaction: return tx_builder.build() -def create_default_peer_auth() -> IcxAuthorization: +def create_default_peer_auth() -> Signer: channel = list(conf.CHANNEL_OPTION)[0] - peer_auth = IcxAuthorization(channel) + peer_auth = Signer(channel) return peer_auth From f0208ba83ec550e084c214846a89e2106cac629f Mon Sep 17 00:00:00 2001 From: Daehee Kim Date: Fri, 18 Jan 2019 18:59:18 +0900 Subject: [PATCH 03/23] [LC-126] Remove cert from PeerInfo --- loopchain/baseservice/peer_object.py | 31 ++-------------------- loopchain/peer/__init__.py | 1 - loopchain/peer/consensus_siever.py | 2 +- loopchain/peer/icx_authorization.py | 28 +++++++++++-------- loopchain/peer/peer_service.py | 2 +- loopchain/protos/loopchain.proto | 13 ++++----- loopchain/radiostation/rs_outer_service.py | 2 +- 7 files changed, 27 insertions(+), 52 deletions(-) diff --git a/loopchain/baseservice/peer_object.py b/loopchain/baseservice/peer_object.py index dd65ed7f2..7d0bd50b1 100644 --- a/loopchain/baseservice/peer_object.py +++ b/loopchain/baseservice/peer_object.py @@ -20,7 +20,6 @@ from loopchain import configure as conf from loopchain.baseservice import StubManager from loopchain.protos import loopchain_pb2_grpc -from loopchain.tools.signature_helper import PublicVerifier, IcxVerifier class PeerStatus(IntEnum): @@ -32,8 +31,8 @@ class PeerStatus(IntEnum): class PeerInfo: """Peer Object""" - def __init__(self, peer_id: str, group_id: str, target: str = "", status: PeerStatus = PeerStatus.unknown, - cert: bytes = b"", order: int = 0): + def __init__(self, peer_id: str, group_id: str, + target: str = "", status: PeerStatus = PeerStatus.unknown, order: int = 0): """ create PeerInfo if connected peer status PeerStatus.connected @@ -41,7 +40,6 @@ def __init__(self, peer_id: str, group_id: str, target: str = "", status: PeerSt :param group_id: peer's group_id :param target: grpc target info default "" :param status: connect status if db loaded peer to PeerStatus.unknown default "" - :param cert: peer's signature cert default b"" :param order: :return: """ @@ -53,8 +51,6 @@ def __init__(self, peer_id: str, group_id: str, target: str = "", status: PeerSt self.__status_update_time = datetime.datetime.now() self.__status = status - self.__cert: bytes = cert - @property def peer_id(self) -> str: return self.__peer_id @@ -79,14 +75,6 @@ def target(self): def target(self, target): self.__target = target - @property - def cert(self) -> bytes: - return self.__cert - - @cert.setter - def cert(self, cert): - self.__cert = cert - @property def status(self): return self.__status @@ -120,10 +108,6 @@ def __init__(self, channel: str, peer_info: PeerInfo): self.__create_live_data() def __create_live_data(self): - """create live data that can't serialized - - :param channel: channel_name - """ try: self.__stub_manager = StubManager(self.__peer_info.target, loopchain_pb2_grpc.PeerServiceStub, @@ -131,13 +115,6 @@ def __create_live_data(self): except Exception as e: logging.exception(f"Create Peer create stub_manager fail target : {self.__peer_info.target} \n" f"exception : {e}") - try: - self.__cert_verifier = IcxVerifier() - self.__cert_verifier.init_and_verify_address(pubkey=self.peer_info.cert, - address=self.peer_info.peer_id) - except Exception as e: - logging.exception(f"create cert verifier error : {self.__channel} {self.__peer_info.cert} \n" - f"exception {e}") @property def peer_info(self)-> PeerInfo: @@ -147,10 +124,6 @@ def peer_info(self)-> PeerInfo: def stub_manager(self) -> StubManager: return self.__stub_manager - @property - def cert_verifier(self): - return self.__cert_verifier - @property def no_response_count(self): return self.__no_response_count diff --git a/loopchain/peer/__init__.py b/loopchain/peer/__init__.py index 84ca86165..8aff946be 100644 --- a/loopchain/peer/__init__.py +++ b/loopchain/peer/__init__.py @@ -19,5 +19,4 @@ from .peer_outer_service import * from .channel_manager import * from .peer_service import * -from .peer_authorization import * from .consensus_base import * diff --git a/loopchain/peer/consensus_siever.py b/loopchain/peer/consensus_siever.py index 9effeb53d..dd74e8126 100644 --- a/loopchain/peer/consensus_siever.py +++ b/loopchain/peer/consensus_siever.py @@ -89,7 +89,7 @@ async def consensus(self): block_builder.height = last_block.header.height + 1 block_builder.prev_hash = last_block.header.hash block_builder.next_leader = next_leader - block_builder.private_key = ObjectManager().channel_service.peer_auth.private_key + block_builder.peer_private_key = ObjectManager().channel_service.peer_auth.private_key block_builder.confirm_prev_block = vote_result or (self._made_block_count > 0) candidate_block = block_builder.build() diff --git a/loopchain/peer/icx_authorization.py b/loopchain/peer/icx_authorization.py index 973e9d86d..d4bf6ae7d 100644 --- a/loopchain/peer/icx_authorization.py +++ b/loopchain/peer/icx_authorization.py @@ -28,16 +28,16 @@ class SignVerifier: def __init__(self): self.address: str = None + def verify_address(self, pubkey: bytes): + return self.address_from_pubkey(pubkey) == self.address + def verify_data(self, origin_data: bytes, signature: bytes): return self.verify_signature(origin_data, signature, False) def verify_hash(self, origin_data, signature): return self.verify_signature(origin_data, signature, True) - def verify_address(self, pubkey: bytes): - return self.address_from_pubkey(pubkey) != self.address - - def verify_signature(self, origin_data: bytes, signature: bytes, is_hash): + def verify_signature(self, origin_data: bytes, signature: bytes, is_hash: bool): try: if is_hash: origin_data = binascii.unhexlify(origin_data) @@ -85,7 +85,13 @@ def __init__(self): super().__init__() self.private_key: PrivateKey = None - def sign(self, data, is_hash=False): + def sign_data(self, data): + return self.sign(data, False) + + def sign_hash(self, data): + return self.sign(data, True) + + def sign(self, data, is_hash: bool): if is_hash: if isinstance(data, str): try: @@ -99,11 +105,11 @@ def sign(self, data, is_hash=False): logging.error(f"data must be bytes \n") return None - signature = self.private_key.ecdsa_sign_recoverable(msg=data, - raw=is_hash, - digest=hashlib.sha3_256) - serialized_sig = self._pri.ecdsa_recoverable_serialize(signature) - return b''.join([serialized_sig[0], bytes([serialized_sig[1]])]) + raw_sig = self.private_key.ecdsa_sign_recoverable(msg=data, + raw=is_hash, + digest=hashlib.sha3_256) + serialized_sig, recover_id = self.private_key.ecdsa_recoverable_serialize(raw_sig) + return serialized_sig + bytes((recover_id, )) @classmethod def from_channel(cls, channel: str): @@ -146,7 +152,7 @@ def from_prikey(cls, prikey: bytes): auth.address = cls.address_from_prikey(prikey) # verify - sign = auth.sign(b'TEST') + sign = auth.sign_data(b'TEST') if auth.verify_data(b'TEST', sign) is False: raise ValueError("Invalid Signature(Peer Certificate load test)") return auth diff --git a/loopchain/peer/peer_service.py b/loopchain/peer/peer_service.py index a3661942d..c5ea3ff92 100644 --- a/loopchain/peer/peer_service.py +++ b/loopchain/peer/peer_service.py @@ -249,7 +249,7 @@ def __get_use_kms(): if conf.GRPC_SSL_KEY_LOAD_TYPE == conf.KeyLoadType.KMS_LOAD: return True for value in conf.CHANNEL_OPTION.values(): - if value[PublicVerifier.KEY_LOAD_TYPE] == conf.KeyLoadType.KMS_LOAD: + if value["key_load_type"] == conf.KeyLoadType.KMS_LOAD: return True return False diff --git a/loopchain/protos/loopchain.proto b/loopchain/protos/loopchain.proto index 944f024c4..3eaca8b59 100644 --- a/loopchain/protos/loopchain.proto +++ b/loopchain/protos/loopchain.proto @@ -357,10 +357,9 @@ message PeerRequest { required string peer_target = 3; required string group_id = 4; required PeerType peer_type = 5; - optional bytes cert = 6; - optional int32 peer_order = 7; - optional bytes peer_object = 8; - optional NodeType node_type = 9; + optional int32 peer_order = 6; + optional bytes peer_object = 7; + optional NodeType node_type = 8; } message ConnectPeerRequest { @@ -368,9 +367,8 @@ message ConnectPeerRequest { optional string channel = 2; required string peer_target = 3; required string group_id = 4; - optional bytes cert = 5; - optional int32 peer_order = 6; - optional bytes peer_object = 7; + optional int32 peer_order = 5; + optional bytes peer_object = 6; } // RadioStation이 Peer에게 줄 정보들 @@ -385,7 +383,6 @@ message GetChannelInfosRequest { required string peer_id = 1; required string peer_target = 2; required string group_id = 3; - optional bytes cert = 4; } message GetChannelInfosReply { diff --git a/loopchain/radiostation/rs_outer_service.py b/loopchain/radiostation/rs_outer_service.py index 776559147..d651a70e3 100644 --- a/loopchain/radiostation/rs_outer_service.py +++ b/loopchain/radiostation/rs_outer_service.py @@ -235,7 +235,7 @@ def ConnectPeer(self, request: loopchain_pb2.ConnectPeerRequest, context): f"\nPeer_target : {request.peer_target}" f"\nChannel : {request.channel}") - peer = PeerInfo(request.peer_id, request.group_id, request.peer_target, PeerStatus.unknown, cert=request.cert) + peer = PeerInfo(request.peer_id, request.group_id, request.peer_target, PeerStatus.unknown) util.logger.spam(f"service::ConnectPeer try add_peer") From 1fb3b05731b2cfdf2ee7f8e857ffa3c9cd3e73a4 Mon Sep 17 00:00:00 2001 From: Daehee Kim Date: Fri, 18 Jan 2019 19:07:28 +0900 Subject: [PATCH 04/23] [LC-126] Move locations of hashing and signature. --- cli_tools/icx_test/icx_wallet.py | 4 +--- loopchain/blockchain/__init__.py | 1 - loopchain/blockchain/transactions/transaction_builder.py | 2 +- loopchain/blockchain/transactions/transaction_serializer.py | 2 +- loopchain/blockchain/transactions/transaction_verifier.py | 2 +- loopchain/channel/channel_service.py | 2 +- loopchain/consensus/vote_message.py | 2 +- loopchain/crypto/__init__.py | 0 loopchain/{blockchain => crypto}/hashing/__init__.py | 0 loopchain/{blockchain => crypto}/hashing/hash_generator.py | 0 .../{blockchain => crypto}/hashing/hash_origin_generator.py | 0 loopchain/{peer/icx_authorization.py => crypto/signature.py} | 0 loopchain/peer/peer_outer_service.py | 1 + loopchain/peer/peer_service.py | 4 ++-- testcase/unittest/test_crypto.py | 2 +- 15 files changed, 10 insertions(+), 12 deletions(-) create mode 100644 loopchain/crypto/__init__.py rename loopchain/{blockchain => crypto}/hashing/__init__.py (100%) rename loopchain/{blockchain => crypto}/hashing/hash_generator.py (100%) rename loopchain/{blockchain => crypto}/hashing/hash_origin_generator.py (100%) rename loopchain/{peer/icx_authorization.py => crypto/signature.py} (100%) diff --git a/cli_tools/icx_test/icx_wallet.py b/cli_tools/icx_test/icx_wallet.py index 887f0b302..d15fe6d6d 100644 --- a/cli_tools/icx_test/icx_wallet.py +++ b/cli_tools/icx_test/icx_wallet.py @@ -2,14 +2,12 @@ import base64 import hashlib import logging -import binascii import random -import sys from secp256k1 import PrivateKey, PublicKey from loopchain import utils, configure as conf from loopchain.blockchain import Hash32 -from loopchain.blockchain.hashing import build_hash_generator +from loopchain.crypto.hashing import build_hash_generator ICX_FACTOR = 10 ** 18 ICX_FEE = 0.01 diff --git a/loopchain/blockchain/__init__.py b/loopchain/blockchain/__init__.py index 683347a02..ee953c249 100644 --- a/loopchain/blockchain/__init__.py +++ b/loopchain/blockchain/__init__.py @@ -16,7 +16,6 @@ from .exception import * from .types import * from .score_base import * -from .hashing import * from .transactions import * from .blocks import * from .blockchain import * diff --git a/loopchain/blockchain/transactions/transaction_builder.py b/loopchain/blockchain/transactions/transaction_builder.py index f24e3f78f..b1a71b362 100644 --- a/loopchain/blockchain/transactions/transaction_builder.py +++ b/loopchain/blockchain/transactions/transaction_builder.py @@ -3,7 +3,7 @@ from abc import abstractmethod, ABC from typing import TYPE_CHECKING from .. import Signature, ExternalAddress, Hash32 -from ..hashing import build_hash_generator +from loopchain.crypto.hashing import build_hash_generator if TYPE_CHECKING: from secp256k1 import PrivateKey diff --git a/loopchain/blockchain/transactions/transaction_serializer.py b/loopchain/blockchain/transactions/transaction_serializer.py index ac504b1e3..700ebe78f 100644 --- a/loopchain/blockchain/transactions/transaction_serializer.py +++ b/loopchain/blockchain/transactions/transaction_serializer.py @@ -1,6 +1,6 @@ from abc import abstractmethod, ABC from typing import TYPE_CHECKING -from ..hashing import build_hash_generator +from loopchain.crypto.hashing import build_hash_generator if TYPE_CHECKING: from . import TransactionVersioner diff --git a/loopchain/blockchain/transactions/transaction_verifier.py b/loopchain/blockchain/transactions/transaction_verifier.py index 537c9d4e0..60b3f6bba 100644 --- a/loopchain/blockchain/transactions/transaction_verifier.py +++ b/loopchain/blockchain/transactions/transaction_verifier.py @@ -3,7 +3,7 @@ from abc import ABC, abstractmethod from typing import TYPE_CHECKING from secp256k1 import PublicKey, PrivateKey -from ..hashing import build_hash_generator +from loopchain.crypto.hashing import build_hash_generator from .. import Hash32, ExternalAddress if TYPE_CHECKING: from . import Transaction diff --git a/loopchain/channel/channel_service.py b/loopchain/channel/channel_service.py index df6062f42..5cf5f7729 100644 --- a/loopchain/channel/channel_service.py +++ b/loopchain/channel/channel_service.py @@ -33,7 +33,7 @@ from loopchain.channel.channel_statemachine import ChannelStateMachine from loopchain.consensus import Consensus, Acceptor, Proposer from loopchain.peer import BlockManager -from loopchain.peer.icx_authorization import Signer +from loopchain.crypto.signature import Signer from loopchain.protos import loopchain_pb2_grpc, message_code, loopchain_pb2 from loopchain.utils import loggers, command_arguments from loopchain.utils.icon_service import convert_params, ParamType, response_to_json_query diff --git a/loopchain/consensus/vote_message.py b/loopchain/consensus/vote_message.py index 542b61637..60811df47 100644 --- a/loopchain/consensus/vote_message.py +++ b/loopchain/consensus/vote_message.py @@ -17,7 +17,7 @@ from enum import IntEnum -from loopchain.blockchain.hashing import build_hash_generator +from loopchain.crypto.hashing import build_hash_generator class VoteMessageType(IntEnum): diff --git a/loopchain/crypto/__init__.py b/loopchain/crypto/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/loopchain/blockchain/hashing/__init__.py b/loopchain/crypto/hashing/__init__.py similarity index 100% rename from loopchain/blockchain/hashing/__init__.py rename to loopchain/crypto/hashing/__init__.py diff --git a/loopchain/blockchain/hashing/hash_generator.py b/loopchain/crypto/hashing/hash_generator.py similarity index 100% rename from loopchain/blockchain/hashing/hash_generator.py rename to loopchain/crypto/hashing/hash_generator.py diff --git a/loopchain/blockchain/hashing/hash_origin_generator.py b/loopchain/crypto/hashing/hash_origin_generator.py similarity index 100% rename from loopchain/blockchain/hashing/hash_origin_generator.py rename to loopchain/crypto/hashing/hash_origin_generator.py diff --git a/loopchain/peer/icx_authorization.py b/loopchain/crypto/signature.py similarity index 100% rename from loopchain/peer/icx_authorization.py rename to loopchain/crypto/signature.py diff --git a/loopchain/peer/peer_outer_service.py b/loopchain/peer/peer_outer_service.py index a0dc3d90c..cc1183578 100644 --- a/loopchain/peer/peer_outer_service.py +++ b/loopchain/peer/peer_outer_service.py @@ -13,6 +13,7 @@ # limitations under the License. """gRPC service for Peer Outer Service""" import asyncio +import copy import datetime from functools import partial diff --git a/loopchain/peer/peer_service.py b/loopchain/peer/peer_service.py index c5ea3ff92..7ef9ca443 100644 --- a/loopchain/peer/peer_service.py +++ b/loopchain/peer/peer_service.py @@ -26,8 +26,8 @@ from loopchain.blockchain import * from loopchain.container import RestService, CommonService from loopchain.peer import PeerInnerService, PeerOuterService -from loopchain.peer.icx_authorization import Signer -from loopchain.protos import loopchain_pb2, loopchain_pb2_grpc, message_code +from loopchain.crypto.signature import Signer +from loopchain.protos import loopchain_pb2, loopchain_pb2_grpc from loopchain.rest_server import RestProxyServer from loopchain.utils import loggers, command_arguments from loopchain.utils.message_queue import StubCollection diff --git a/testcase/unittest/test_crypto.py b/testcase/unittest/test_crypto.py index ec4d6bff4..85dd216dd 100644 --- a/testcase/unittest/test_crypto.py +++ b/testcase/unittest/test_crypto.py @@ -34,7 +34,7 @@ from loopchain.utils import loggers from loopchain.blockchain import Hash32, TransactionSerializer, TransactionVersioner -from loopchain.blockchain.hashing import build_hash_generator +from loopchain.crypto.hashing import build_hash_generator import testcase.unittest.test_util as test_util From 145f6c9578cfac22eddd51e8af293c5d91c8ae82 Mon Sep 17 00:00:00 2001 From: Daehee Kim Date: Tue, 22 Jan 2019 10:47:05 +0900 Subject: [PATCH 05/23] [LC-126] Modify signature.py --- loopchain/crypto/signature.py | 109 ++++++++++++++++++++++------------ 1 file changed, 72 insertions(+), 37 deletions(-) diff --git a/loopchain/crypto/signature.py b/loopchain/crypto/signature.py index d4bf6ae7d..ddf784238 100644 --- a/loopchain/crypto/signature.py +++ b/loopchain/crypto/signature.py @@ -16,6 +16,7 @@ import binascii import hashlib import logging +from typing import Union from asn1crypto import keys from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization @@ -53,31 +54,75 @@ def verify_signature(self, origin_data: bytes, signature: bytes, is_hash: bool): logging.debug(f"signature verify fail : {origin_data} {signature}") return False + @classmethod + def address_from_pubkey(cls, pubkey: bytes): + hash_pub = hashlib.sha3_256(pubkey[1:]).hexdigest() + return f"hx{hash_pub[-40:]}" + + @classmethod + def address_from_prikey(cls, prikey: bytes): + pubkey = PrivateKey(prikey).pubkey.serialize(compressed=False) + return cls.address_from_pubkey(pubkey) + @classmethod def from_address(cls, address: str): verifier = SignVerifier() verifier.address = address return verifier + @classmethod + def from_channel(cls, channel: str): + from loopchain import configure as conf + + public_file = conf.CHANNEL_OPTION[channel]["public_path"] + return cls.from_pubkey_file(public_file) + + @classmethod + def from_pubkey_file(cls, pubkey_file: str): + with open(pubkey_file, "rb") as der: + pubkey = der.read() + return cls.from_pubkey(pubkey) + @classmethod def from_pubkey(cls, pubkey: bytes): address = cls.address_from_pubkey(pubkey) return cls.from_address(address) @classmethod - def from_prikey(cls, prikey: bytes): - address = cls.address_from_prikey(prikey) - return cls.from_address(address) + def from_prikey_file(cls, prikey_file: str, password: Union[str, bytes]): + with open(prikey_file, "rb") as der: + private_bytes = der.read() - @classmethod - def address_from_pubkey(cls, pubkey: bytes): - hash_pub = hashlib.sha3_256(pubkey[1:]).hexdigest() - return f"hx{hash_pub[-40:]}" + if isinstance(password, str): + password = password.encode() + try: + try: + temp_private = serialization \ + .load_der_private_key(private_bytes, + password, + default_backend()) + except Exception as e: + # try pem type private load + temp_private = serialization \ + .load_pem_private_key(private_bytes, + password, + default_backend()) + except Exception as e: + raise ValueError("Invalid Password(Peer Certificate load test)") + + no_pass_private = temp_private.private_bytes( + encoding=serialization.Encoding.DER, + format=serialization.PrivateFormat.PKCS8, + encryption_algorithm=serialization.NoEncryption() + ) + key_info = keys.PrivateKeyInfo.load(no_pass_private) + prikey = long_to_bytes(key_info['private_key'].native['private_key']) + return cls.from_prikey(prikey) @classmethod - def address_from_prikey(cls, prikey: bytes): - pubkey = PrivateKey(prikey).pubkey.serialize(compressed=False) - return cls.address_from_pubkey(pubkey) + def from_prikey(cls, prikey: bytes): + address = cls.address_from_prikey(prikey) + return cls.from_address(address) class Signer(SignVerifier): @@ -111,39 +156,29 @@ def sign(self, data, is_hash: bool): serialized_sig, recover_id = self.private_key.ecdsa_recoverable_serialize(raw_sig) return serialized_sig + bytes((recover_id, )) + @classmethod + def from_address(cls, address: str): + raise TypeError("Cannot create `Signer` from address") + @classmethod def from_channel(cls, channel: str): from loopchain import configure as conf - with open(conf.CHANNEL_OPTION[channel]["private_path"], "rb") as der: - private_bytes = der.read() - private_pass = conf.CHANNEL_OPTION[channel]["private_password"] + prikey_file = conf.CHANNEL_OPTION[channel]["private_path"] + password = conf.CHANNEL_OPTION[channel]["private_password"] + return cls.from_prikey_file(prikey_file, password) - if isinstance(private_pass, str): - private_pass = private_pass.encode() - try: - try: - temp_private = serialization \ - .load_der_private_key(private_bytes, - private_pass, - default_backend()) - except Exception as e: - # try pem type private load - temp_private = serialization \ - .load_pem_private_key(private_bytes, - private_pass, - default_backend()) - except Exception as e: - raise ValueError("Invalid Password(Peer Certificate load test)") + @classmethod + def from_pubkey(cls, pubkey: bytes): + raise TypeError("Cannot create `Signer` from pubkey") - no_pass_private = temp_private.private_bytes( - encoding=serialization.Encoding.DER, - format=serialization.PrivateFormat.PKCS8, - encryption_algorithm=serialization.NoEncryption() - ) - key_info = keys.PrivateKeyInfo.load(no_pass_private) - prikey = long_to_bytes(key_info['private_key'].native['private_key']) - return cls.from_prikey(prikey) + @classmethod + def from_pubkey_file(cls, pubkey_file: str): + raise TypeError("Cannot create `Signer` from pubkey file") + + @classmethod + def from_prikey_file(cls, prikey_file: str, password: Union[str, bytes]): + return super().from_prikey_file(prikey_file, password) @classmethod def from_prikey(cls, prikey: bytes): From fca305b2dc48e4bcf0a5766935c148457d3ae3b7 Mon Sep 17 00:00:00 2001 From: Jiyun Park Date: Tue, 22 Jan 2019 17:43:25 +0900 Subject: [PATCH 06/23] [LC-127] update ctz url in configuration and guide for testicx --- README.md | 26 +++++++++++++++----------- loopchain/configure_default.py | 4 ++-- run_mainnet_citizen.sh | 21 --------------------- run_testnet_citizen.sh | 21 --------------------- stop_mainnet_citizen.sh | 19 ------------------- stop_testnet_citizen.sh | 19 ------------------- 6 files changed, 17 insertions(+), 93 deletions(-) delete mode 100755 run_mainnet_citizen.sh delete mode 100755 run_testnet_citizen.sh delete mode 100755 stop_mainnet_citizen.sh delete mode 100755 stop_testnet_citizen.sh diff --git a/README.md b/README.md index 15f597702..cf90d7a85 100644 --- a/README.md +++ b/README.md @@ -269,7 +269,7 @@ Total supply of ICX in hex: 0x2961fff8ca4a62327800000 Total supply of ICX in decimal: 800460000000000000000000000 ``` -* Create a Keystore +* Create an account (Skip this if you already have testnet account.) Create a keystore file in the given path. Generate a private and public key pair using secp256k1 library. @@ -312,7 +312,10 @@ It will create new keystore file like this: } ``` -> For there's no balance at that address, you need to request some testnet icx to your address. The instruction and the link to request testnet icx will be updated soon. +For there's no balance on new address, you need to request some testnet icx to it. **Please refer to [here](https://github.com/icon-project/icon-project.github.io/blob/master/docs/icon_network.md#testnet-for-exchanges) for test icx and detailed ICON testnet network information.** +Please note that the `Testnet node url` of your citizen node is `https://test-ctz.solidwallet.io` when sending the request email. + +If you want to load and view your testnet account on ICONex Chrome extension, please refer [here](https://github.com/icon-project/icon-project.github.io/blob/master/docs/icon_network.md#how-to-change-network-in-iconex-chrome-extension). * get balance @@ -323,8 +326,8 @@ usage: tbears balance [-h] [-u URI] [-c CONFIG] address (venv) $ tbears balance hx63499c4efc26c9370f6d68132c116d180d441266 // Result -balance in hex: 0xde0b6b3a7640000 -balance in decimal: 1000000000000000000 +balance in hex: {your balance in hex} +balance in decimal: {your balance in decimal} ``` * Send transaction @@ -374,7 +377,7 @@ The address to which icx is sent(`to`) is the address the ICON developers usuall Example ```bash -(venv) $ tbears sendtx -k my_keystore.json send.json +(venv) $ tbears sendtx -k my_keystore.json sendtx_testnet.json input your keystore password: @@ -420,7 +423,7 @@ This method returns the last block the Citizen node has currently synced. usage: tbears lastblock [-h] [-u URI] [-c CONFIG] // Example -(venv) $ tbears lastblock -u http://127.0.0.1:9100/api/v3 // Example (default uri: http://localhost:9000/api/v3) +(venv) $ tbears lastblock -u http://127.0.0.1:9100/api/v3 // result block info : { @@ -514,14 +517,15 @@ Total supply of ICX in hex: 0x2961fff8ca4a62327800000 Total supply of ICX in decimal: 800460000000000000000000000 ``` -* Create your account on Mainnet if you don't have one. -For Mainnet, we recommend you to create your account on Official ICONex application. +* To send transaction on Mainnet, you need an ICON account and a balance. If you don't have on, please create your account on official ICONex application as guide below. 1. Go to our website at https://icon.foundation 2. Click ‘Wallet’ button on the top 3. Move to Chrome extension page (https://chrome.google.com/webstore/detail/iconex-beta/flpiciilemghbmfalicajoolhkkenfel?hl=en) 4. Click “Add on +CHROME” button on the upper right corner +**For detailed ICON mainnet network information, please refer to [here](https://github.com/icon-project/icon-project.github.io/blob/master/docs/icon_network.md#mainnet).** + * get balance ```bash @@ -531,13 +535,13 @@ usage: tbears balance [-h] [-u URI] [-c CONFIG] address (venv) $ tbears balance -u http://127.0.0.1:9100/api/v3 hx63499c4efc26c9370f6d68132c116d180d441266 // Result -balance in hex: 0xde0b6b3a7640000 -balance in decimal: 1000000000000000000 +balance in hex: {your balance in hex} +balance in decimal: {your balance in decimal} ``` * Send transaction -Now that you have received a sufficient amount of icx, you can use it to send transactions. +If you have sufficient amount of icx, you can use it to send transactions. ``` usage: tbears sendtx [-h] [-u URI] [-k KEYSTORE] [-c CONFIG] json_file diff --git a/loopchain/configure_default.py b/loopchain/configure_default.py index c7c1eecfa..56d534b90 100644 --- a/loopchain/configure_default.py +++ b/loopchain/configure_default.py @@ -432,8 +432,8 @@ def is_support_node_function(cls, node_function, node_type): #################### # ICON #### #################### -URL_CITIZEN_TESTNET = 'https://int-test-ctz.solidwallet.io' -URL_CITIZEN_MAINNET = 'https://int-ctz.solidwallet.io' +URL_CITIZEN_TESTNET = 'https://test-ctz.solidwallet.io' +URL_CITIZEN_MAINNET = 'https://ctz.solidwallet.io' CONF_PATH_LOOPCHAIN_TESTNET = os.path.join(LOOPCHAIN_ROOT_PATH, 'conf/testnet/loopchain_conf.json') CONF_PATH_LOOPCHAIN_MAINNET = os.path.join(LOOPCHAIN_ROOT_PATH, 'conf/mainnet/loopchain_conf.json') CONF_PATH_ICONSERVICE_DEV = os.path.join(LOOPCHAIN_ROOT_PATH, 'conf/develop/iconservice_conf.json') diff --git a/run_mainnet_citizen.sh b/run_mainnet_citizen.sh deleted file mode 100755 index 8c67c46b3..000000000 --- a/run_mainnet_citizen.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/sh - -export REDIRECT_PROTOCOL=https -PID_FILE="mainnet_citizen.pid" - -if [ -f ${PID_FILE} ]; then - echo "remove ${PID_FILE}" - rm -f ${PID_FILE} -fi - -touch ${PID_FILE} - -echo "Run loopchain for citizen start!" -./loopchain.py citizen -d -r https://int-ctz.solidwallet.io -o ./conf/mainnet/loopchain_conf.json & -echo $! > $PID_FILE - -echo "Run iconservice for citizen start!" -iconservice start -c ./conf/mainnet/iconservice_conf.json & - -echo "Run iconrpcserver for citizen start!" -iconrpcserver start -p 9000 -c conf/mainnet/iconrpcserver_conf.json & diff --git a/run_testnet_citizen.sh b/run_testnet_citizen.sh deleted file mode 100755 index 2f3d04a07..000000000 --- a/run_testnet_citizen.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/sh - -export REDIRECT_PROTOCOL=https -PID_FILE="testnet_citizen.pid" - -if [ -f ${PID_FILE} ]; then - echo "remove ${PID_FILE}" - rm -f ${PID_FILE} -fi - -touch $PID_FILE - -echo "Run loopchain for citizen start!" -./loopchain.py citizen -r https://int-test-ctz.solidwallet.io -o ./conf/testnet/loopchain_conf.json & -echo $! > $PID_FILE - -echo "Run iconservice for citizen start!" -iconservice start -c ./conf/testnet/iconservice_conf.json & - -echo "Run iconrpcserver for citizen start!" -iconrpcserver start -p 9100 -c conf/testnet/iconrpcserver_conf.json & diff --git a/stop_mainnet_citizen.sh b/stop_mainnet_citizen.sh deleted file mode 100755 index f7d72e15d..000000000 --- a/stop_mainnet_citizen.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/sh - -echo "Stop loopchain Processes" - -PID_FILE="mainnet_citizen.pid" -if [ -f ${PID_FILE} ]; then - echo "Kill loopchain on mainnet process" - PID=`cat ${PID_FILE}` - pgrep -P ${PID} | xargs -I ARG kill -9 ARG - kill ${PID} - rm -f ${PID_FILE} -fi - -echo "Stopping iconservice..." -iconservice stop -c ./conf/mainnet/iconservice_conf.json - -echo "Stopping iconrpcserver..." -iconrpcserver stop -p 9100 -c ./conf/mainnet/iconrpcserver_conf.json -pkill -f gunicorn diff --git a/stop_testnet_citizen.sh b/stop_testnet_citizen.sh deleted file mode 100755 index ff66d0f7f..000000000 --- a/stop_testnet_citizen.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/sh - -echo "Stop loopchain Processes" - -PID_FILE="testnet_citizen.pid" -if [ -f ${PID_FILE} ]; then - echo "Kill loopchain on testnet process" - PID=`cat ${PID_FILE}` - pgrep -P ${PID} | xargs -I ARG kill -9 ARG - kill ${PID} - rm -f ${PID_FILE} -fi - -echo "Stopping iconservice..." -iconservice stop -c ./conf/testnet/iconservice_conf.json - -echo "Stopping iconrpcserver..." -iconrpcserver stop -p 9100 -c ./conf/testnet/iconrpcserver_conf.json -pkill -f gunicorn From 78b49384e9235ffa500f46dd6dba79ebb5bb003c Mon Sep 17 00:00:00 2001 From: Daehee Kim Date: Tue, 22 Jan 2019 21:24:41 +0900 Subject: [PATCH 07/23] [LC-126] Running without RS --- loopchain/baseservice/peer_manager.py | 9 +++- loopchain/channel/channel_service.py | 63 +++++++++++++---------- loopchain/channel/channel_statemachine.py | 5 ++ loopchain/configure_default.py | 1 + loopchain/peer/peer_service.py | 62 +++++++++++++--------- 5 files changed, 86 insertions(+), 54 deletions(-) diff --git a/loopchain/baseservice/peer_manager.py b/loopchain/baseservice/peer_manager.py index 41aa0054b..05b427d7f 100644 --- a/loopchain/baseservice/peer_manager.py +++ b/loopchain/baseservice/peer_manager.py @@ -17,6 +17,7 @@ import pickle import threading import math +from typing import Union import loopchain.utils as util from loopchain import configure as conf @@ -181,12 +182,16 @@ def remove_peer_by_target(self, channel_manager, peer_target): channel_manager.remove_audience( channel=self.__channel_name, peer_target=peer_target) - def add_peer(self, peer_info): + def add_peer(self, peer_info: Union[PeerInfo, dict]): """add_peer to peer_manager - :param peer_info: PeerInfo + :param peer_info: PeerInfo, dict :return: create_peer_order """ + + if isinstance(peer_info, dict): + peer_info = PeerInfo(peer_info["id"], peer_info["id"], peer_info["peer_target"], order=peer_info["order"]) + logging.debug(f"add peer id: {peer_info.peer_id}") # If exist same peer_target in peer_list, delete exist one. diff --git a/loopchain/channel/channel_service.py b/loopchain/channel/channel_service.py index 5cf5f7729..cff18d649 100644 --- a/loopchain/channel/channel_service.py +++ b/loopchain/channel/channel_service.py @@ -196,16 +196,15 @@ async def init(self, peer_port, peer_target, rest_target, radio_station_target, ChannelProperty().node_type = conf.NodeType(node_type) ChannelProperty().score_package = score_package + self.__peer_manager = PeerManager(ChannelProperty().name) self.__init_peer_auth() - self.__init_block_manager() self.__init_broadcast_scheduler() + self.__init_block_manager() self.__init_radio_station_stub() await self.__init_score_container() await self.__inner_service.connect(conf.AMQP_CONNECTION_ATTEMPS, conf.AMQP_RETRY_DELAY, exclusive=True) - self.__peer_manager = PeerManager(ChannelProperty().name) - if conf.CONSENSUS_ALGORITHM == conf.ConsensusAlgorithm.lft: util.logger.spam(f"init consensus !") # load consensus @@ -216,7 +215,10 @@ async def init(self, peer_port, peer_target, rest_target, radio_station_target, self.__init_acceptor(peer_id=peer_id) if self.is_support_node_function(conf.NodeFunction.Vote): - self.connect_to_radio_station() + if conf.ENABLE_REP_RADIO_STATION: + self.connect_to_radio_station() + else: + await self.__load_peers_from_file() else: self.__init_node_subscriber() @@ -229,13 +231,17 @@ async def evaluate_network(self): async def subscribe_network(self): # Subscribe to radiostation and block_sync_target_stub - await self.subscribe_to_radio_station() + if self.is_support_node_function(conf.NodeFunction.Vote): + if conf.ENABLE_REP_RADIO_STATION: + await self.subscribe_to_radio_station() - if self.is_support_node_function(conf.NodeFunction.Vote) and self.block_manager.peer_type == loopchain_pb2.PEER: - await self.__subscribe_call_to_stub( - peer_stub=self.block_manager.subscribe_target_peer_stub, - peer_type=loopchain_pb2.PEER - ) + if self.block_manager.peer_type == loopchain_pb2.PEER: + await self.__subscribe_call_to_stub( + peer_stub=self.block_manager.subscribe_target_peer_stub, + peer_type=loopchain_pb2.PEER + ) + else: + await self.subscribe_to_radio_station() self.generate_genesis_block() @@ -307,11 +313,12 @@ def __init_broadcast_scheduler(self): def __init_radio_station_stub(self): if self.is_support_node_function(conf.NodeFunction.Vote): - self.__radio_station_stub = StubManager.get_stub_manager_to_server( - ChannelProperty().radio_station_target, - loopchain_pb2_grpc.RadioStationStub, - conf.CONNECTION_RETRY_TIMEOUT_TO_RS, - ssl_auth_type=conf.GRPC_SSL_TYPE) + if conf.ENABLE_REP_RADIO_STATION: + self.__radio_station_stub = StubManager.get_stub_manager_to_server( + ChannelProperty().radio_station_target, + loopchain_pb2_grpc.RadioStationStub, + conf.CONNECTION_RETRY_TIMEOUT_TO_RS, + ssl_auth_type=conf.GRPC_SSL_TYPE) else: self.__radio_station_stub = RestStubManager(ChannelProperty().radio_station_target, ChannelProperty().name) @@ -389,6 +396,13 @@ async def __load_score(self): return score_info + async def __load_peers_from_file(self): + channel_info = await StubCollection().peer_stub.async_task().get_channel_infos() + for peer_info in channel_info[ChannelProperty().name]["peers"]: + self.__peer_manager.add_peer(peer_info) + self.__broadcast_scheduler.schedule_job(BroadcastCommand.SUBSCRIBE, peer_info["peer_target"]) + self.show_peers() + def is_support_node_function(self, node_function): return conf.NodeType.is_support_node_function(node_function, ChannelProperty().node_type) @@ -662,24 +676,19 @@ async def reset_leader(self, new_leader_id, block_height=0): peer_type = loopchain_pb2.PEER if self_peer_object.target == peer_leader.target: - loggers.get_preset().is_leader = True - loggers.get_preset().update_logger() - logging.debug("Set Peer Type Leader!") peer_type = loopchain_pb2.BLOCK_GENERATOR self.state_machine.turn_to_leader() if conf.CONSENSUS_ALGORITHM != conf.ConsensusAlgorithm.lft: - self.peer_manager.announce_new_leader( - self.peer_manager.get_leader_peer().peer_id, - new_leader_id, - is_broadcast=True, - self_peer_id=ChannelProperty().peer_id - ) + if conf.ENABLE_REP_RADIO_STATION: + self.peer_manager.announce_new_leader( + self.peer_manager.get_leader_peer().peer_id, + new_leader_id, + is_broadcast=True, + self_peer_id=ChannelProperty().peer_id + ) else: - loggers.get_preset().is_leader = False - loggers.get_preset().update_logger() - logging.debug("Set Peer Type Peer!") self.state_machine.turn_to_peer() diff --git a/loopchain/channel/channel_statemachine.py b/loopchain/channel/channel_statemachine.py index 1674b031e..01395d396 100644 --- a/loopchain/channel/channel_statemachine.py +++ b/loopchain/channel/channel_statemachine.py @@ -18,6 +18,7 @@ import loopchain.utils as util from loopchain import configure as conf +from loopchain.utils import loggers from loopchain.peer import status_code from loopchain.protos import loopchain_pb2 from loopchain.statemachine import statemachine @@ -134,12 +135,16 @@ def _do_vote(self): self.__channel_service.block_manager.vote_as_peer() def _vote_on_enter(self): + loggers.get_preset().is_leader = False + loggers.get_preset().update_logger() util.logger.spam(f"\nvote_on_enter") def _vote_on_exit(self): util.logger.spam(f"\nvote_on_exit") def _blockgenerate_on_enter(self): + loggers.get_preset().is_leader = True + loggers.get_preset().update_logger() self.__channel_service.block_manager.start_block_generate_timer() def _blockgenerate_on_exit(self): diff --git a/loopchain/configure_default.py b/loopchain/configure_default.py index ad44bd8ef..9a78b94e9 100644 --- a/loopchain/configure_default.py +++ b/loopchain/configure_default.py @@ -332,6 +332,7 @@ def is_support_node_function(cls, node_function, node_type): LOOPCHAIN_TEST_CHANNEL = "loopchain_test" CHANNEL_MANAGE_DATA_PATH = os.path.join(LOOPCHAIN_ROOT_PATH, 'channel_manage_data.json') # Channel Manage Data Path ENABLE_CHANNEL_AUTH = False # if this option is true, peer only gets channel infos to which it belongs. +ENABLE_REP_RADIO_STATION = True CHANNEL_RESTART_TIMEOUT = 120 CHANNEL_BUILTIN = True diff --git a/loopchain/peer/peer_service.py b/loopchain/peer/peer_service.py index 7ef9ca443..44d8056e6 100644 --- a/loopchain/peer/peer_service.py +++ b/loopchain/peer/peer_service.py @@ -139,17 +139,16 @@ def radio_station_target(self): @property def stub_to_radiostation(self): - stub_type = loopchain_pb2_grpc.PeerServiceStub - if self.is_support_node_function(conf.NodeFunction.Vote): - stub_type = loopchain_pb2_grpc.RadioStationStub - if self.__radio_station_stub is None: if self.is_support_node_function(conf.NodeFunction.Vote): - self.__radio_station_stub = StubManager.get_stub_manager_to_server( - self.__radio_station_target, - stub_type, - conf.CONNECTION_RETRY_TIMEOUT_TO_RS, - ssl_auth_type=conf.GRPC_SSL_TYPE) + if conf.ENABLE_REP_RADIO_STATION: + self.__radio_station_stub = StubManager.get_stub_manager_to_server( + self.__radio_station_target, + loopchain_pb2_grpc.RadioStationStub, + conf.CONNECTION_RETRY_TIMEOUT_TO_RS, + ssl_auth_type=conf.GRPC_SSL_TYPE) + else: + self.__radio_station_stub = None else: self.__radio_station_stub = RestStubManager(self.__radio_station_target) @@ -179,22 +178,35 @@ def service_stop(self): def __get_channel_infos(self): # util.logger.spam(f"__get_channel_infos:node_type::{self.__node_type}") if self.is_support_node_function(conf.NodeFunction.Vote): - response = self.stub_to_radiostation.call_in_times( - method_name="GetChannelInfos", - message=loopchain_pb2.GetChannelInfosRequest( - peer_id=self.__peer_id, - peer_target=self.__peer_target, - group_id=self.group_id), - retry_times=conf.CONNECTION_RETRY_TIMES_TO_RS, - is_stub_reuse=False, - timeout=conf.CONNECTION_TIMEOUT_TO_RS - ) - # util.logger.spam(f"__get_channel_infos:response::{response}") - - if not response: - return None - logging.info(f"Connect to channels({util.pretty_json(response.channel_infos)})") - channels = json.loads(response.channel_infos) + if conf.ENABLE_REP_RADIO_STATION: + response = self.stub_to_radiostation.call_in_times( + method_name="GetChannelInfos", + message=loopchain_pb2.GetChannelInfosRequest( + peer_id=self.__peer_id, + peer_target=self.__peer_target, + group_id=self.group_id), + retry_times=conf.CONNECTION_RETRY_TIMES_TO_RS, + is_stub_reuse=False, + timeout=conf.CONNECTION_TIMEOUT_TO_RS + ) + # util.logger.spam(f"__get_channel_infos:response::{response}") + + if not response: + return None + logging.info(f"Connect to channels({util.pretty_json(response.channel_infos)})") + channels = json.loads(response.channel_infos) + else: + logging.debug(f"try to load channel management data from json file ({conf.CHANNEL_MANAGE_DATA_PATH})") + try: + with open(conf.CHANNEL_MANAGE_DATA_PATH) as file: + json_data = json.load(file) + json_string = json.dumps(json_data).replace('[local_ip]', util.get_private_ip()) + channels = json.loads(json_string) + + logging.info(f"loading channel info : {json_data}") + except FileNotFoundError as e: + util.exit_and_msg(f"cannot open json file in ({conf.CHANNEL_MANAGE_DATA_PATH}): {e}") + raise # To make linter happy. else: response = self.stub_to_radiostation.call_in_times(method_name="GetChannelInfos") channels = {channel: value for channel, value in response["channel_infos"].items()} From b9fa811823d8e4f5393c20ebc4b0f3ab0758c390 Mon Sep 17 00:00:00 2001 From: Daehee Kim Date: Wed, 23 Jan 2019 15:16:55 +0900 Subject: [PATCH 08/23] [LC-126] Fix broken tests. --- .../test_certificate_authorization.py | 5 +--- testcase/unittest/test_transaction.py | 1 - testcase/unittest/test_vote.py | 24 ++++++------------- 3 files changed, 8 insertions(+), 22 deletions(-) diff --git a/testcase/unittest/test_certificate_authorization.py b/testcase/unittest/test_certificate_authorization.py index c811cedc9..dc5570231 100644 --- a/testcase/unittest/test_certificate_authorization.py +++ b/testcase/unittest/test_certificate_authorization.py @@ -26,11 +26,8 @@ from cryptography.hazmat.primitives import hashes, serialization from cryptography.hazmat.primitives.asymmetric import rsa, padding, ec -import loopchain.utils as util import testcase.unittest.test_util as test_util -from loopchain.configure_default import KeyLoadType -from loopchain.peer import PeerAuthorization -from loopchain.radiostation import CertificateAuthorization, RadioStationService +from loopchain.radiostation import CertificateAuthorization from loopchain import configure as conf from loopchain.utils import loggers diff --git a/testcase/unittest/test_transaction.py b/testcase/unittest/test_transaction.py index 10eb65ad2..589b20ee7 100644 --- a/testcase/unittest/test_transaction.py +++ b/testcase/unittest/test_transaction.py @@ -27,7 +27,6 @@ from cryptography.hazmat.backends import default_backend import testcase.unittest.test_util as test_util -from loopchain.peer import PeerAuthorization from loopchain import configure as conf from loopchain.utils import loggers diff --git a/testcase/unittest/test_vote.py b/testcase/unittest/test_vote.py index fa1c0611a..f9ca9e89f 100644 --- a/testcase/unittest/test_vote.py +++ b/testcase/unittest/test_vote.py @@ -23,7 +23,6 @@ from loopchain.baseservice import PeerManager, PeerInfo from loopchain.blockchain import Vote from loopchain.protos import loopchain_pb2 -from loopchain.tools.signature_helper import PublicVerifier from loopchain.utils import loggers loggers.set_preset_type(loggers.PresetType.develop) @@ -31,15 +30,8 @@ class TestVote(unittest.TestCase): - - __cert = None - def setUp(self): test_util.print_testname(self._testMethodName) - if self.__cert is None: - with open(conf.CHANNEL_OPTION[list(conf.CHANNEL_OPTION)[0]][PublicVerifier.PUBLIC_PATH], "rb") as der: - cert_byte = der.read() - self.__cert = cert_byte def tearDown(self): pass @@ -50,7 +42,6 @@ def __make_peer_info(self, peer_id, group_id): peer_info.peer_type = loopchain_pb2.PEER peer_info.peer_id = peer_id peer_info.group_id = group_id - peer_info.cert = self.__cert return peer_info def test_vote_init_from_audience(self): @@ -81,8 +72,7 @@ def test_vote_init_from_peer_list(self): def __add_peer_to_peer_manager(self, peer_manager: PeerManager, number_of_peer): for i in range(1, number_of_peer + 1): number = str(i) - peer_data = PeerInfo("peerid-" + number, "groupid-" + number, "peerid-" + number + "_target", - cert=self.__cert) + peer_data = PeerInfo("peerid-" + number, "groupid-" + number, "peerid-" + number + "_target") peer_manager.add_peer(peer_data) def test_vote_init_from_different_source(self): @@ -105,8 +95,8 @@ def test_add_vote(self): # GIVEN peer_manager = PeerManager(conf.LOOPCHAIN_DEFAULT_CHANNEL) self.__add_peer_to_peer_manager(peer_manager, 3) - peer_manager.add_peer(PeerInfo("peerid-4", "groupid-3", "peerid-4_target", cert=self.__cert)) - peer_manager.add_peer(PeerInfo("peerid-5", "groupid-3", "peerid-5_target", cert=self.__cert)) + peer_manager.add_peer(PeerInfo("peerid-4", "groupid-3", "peerid-4_target")) + peer_manager.add_peer(PeerInfo("peerid-5", "groupid-3", "peerid-5_target")) vote = Vote("block_hash", peer_manager) logging.debug("votes: " + str(vote.votes)) @@ -123,8 +113,8 @@ def test_add_vote_fail_before_add_peer(self): # GIVEN peer_manager = PeerManager(conf.LOOPCHAIN_DEFAULT_CHANNEL) self.__add_peer_to_peer_manager(peer_manager, 3) - peer_manager.add_peer(PeerInfo("peerid-4", "groupid-3", "peerid-4_target", cert=self.__cert)) - peer_manager.add_peer(PeerInfo("peerid-5", "groupid-3", "peerid-5_target", cert=self.__cert)) + peer_manager.add_peer(PeerInfo("peerid-4", "groupid-3", "peerid-4_target")) + peer_manager.add_peer(PeerInfo("peerid-5", "groupid-3", "peerid-5_target")) vote = Vote("block_hash", peer_manager) logging.debug("votes: " + str(vote.votes)) @@ -146,8 +136,8 @@ def test_fail_vote(self): # GIVEN peer_manager = PeerManager(conf.LOOPCHAIN_DEFAULT_CHANNEL) self.__add_peer_to_peer_manager(peer_manager, 3) - peer_manager.add_peer(PeerInfo("peerid-4", "groupid-3", "peerid-4_target", cert=self.__cert)) - peer_manager.add_peer(PeerInfo("peerid-5", "groupid-3", "peerid-5_target", cert=self.__cert)) + peer_manager.add_peer(PeerInfo("peerid-4", "groupid-3", "peerid-4_target")) + peer_manager.add_peer(PeerInfo("peerid-5", "groupid-3", "peerid-5_target")) vote = Vote("block_hash", peer_manager) logging.debug("votes: " + str(vote.votes)) From a9d99261ba9f6c576fed5c21a9eb9ead54d6f0ec Mon Sep 17 00:00:00 2001 From: Jiyun Park Date: Wed, 23 Jan 2019 18:41:53 +0900 Subject: [PATCH 09/23] [LC-102] change log level of iconservice and iconrpcserver to warning --- conf/develop/iconrpcserver_conf.json | 2 +- conf/develop/iconservice_conf.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/conf/develop/iconrpcserver_conf.json b/conf/develop/iconrpcserver_conf.json index 13aa5b441..2f6cc760a 100644 --- a/conf/develop/iconrpcserver_conf.json +++ b/conf/develop/iconrpcserver_conf.json @@ -2,7 +2,7 @@ "log": { "logger": "iconrpcserver", "colorLog": true, - "level": "info", + "level": "warning", "filePath": "./log/develop/iconrpcserver.log", "outputType": "console|file", "rotate": { diff --git a/conf/develop/iconservice_conf.json b/conf/develop/iconservice_conf.json index c62eb570e..22f61be5c 100644 --- a/conf/develop/iconservice_conf.json +++ b/conf/develop/iconservice_conf.json @@ -2,7 +2,7 @@ "log": { "logger": "iconservice", "colorLog": true, - "level": "info", + "level": "warning", "filePath": "./log/develop/iconservice.log", "outputType": "console|file", "rotate": { From 1491e3ee48824c6a068e53231326eb330e0b5bc5 Mon Sep 17 00:00:00 2001 From: winDy Date: Fri, 18 Jan 2019 13:08:45 +0900 Subject: [PATCH 10/23] [LC-122] remove announce_new_leader in reset_leader of channel_service, add add_timer_convenient for code convenient. --- loopchain/baseservice/timer_service.py | 17 +++++++++++++++-- loopchain/channel/channel_inner_service.py | 4 ++-- loopchain/peer/block_manager.py | 3 ++- .../utils/loggers/configuration_presets.py | 3 +-- 4 files changed, 20 insertions(+), 7 deletions(-) diff --git a/loopchain/baseservice/timer_service.py b/loopchain/baseservice/timer_service.py index 207a4730a..5e9364d90 100644 --- a/loopchain/baseservice/timer_service.py +++ b/loopchain/baseservice/timer_service.py @@ -45,7 +45,7 @@ def __init__(self, **kwargs): self.__is_repeat = kwargs.get("is_repeat", False) self.is_run_at_start = kwargs.get("is_run_at_start", False) self.__callback = kwargs.get("callback", None) - self.__kwargs = kwargs.get("callback_kwargs", {}) + self.__kwargs = kwargs.get("callback_kwargs") or {} @property def target(self): @@ -129,6 +129,19 @@ def add_timer(self, key, timer): if timer.is_run_at_start: self.restart_timer(key) + def add_timer_convenient(self, timer_key, duration, is_repeat=False, callback=None, callback_kwargs=None): + if timer_key not in self.__timer_list: + self.add_timer( + timer_key, + Timer( + target=timer_key, + duration=duration, + is_repeat=is_repeat, + callback=callback, + callback_kwargs=callback_kwargs + ) + ) + def remove_timer(self, key): """remove timer from self.__timer_list @@ -192,7 +205,7 @@ def stop_timer(self, key, off_type=OffType.normal): logging.debug(f"TIMER IS STOP ({key})") util.logger.spam(f"remain timers after stop_timer: {self.__timer_list.keys()}") else: - logging.warning(f'stop_timer:There is no value by this key: {key}') + logging.debug(f'stop_timer:There is no value by this key: {key}') def stop(self): super().stop() diff --git a/loopchain/channel/channel_inner_service.py b/loopchain/channel/channel_inner_service.py index 84034ddbb..6d99f8ffd 100644 --- a/loopchain/channel/channel_inner_service.py +++ b/loopchain/channel/channel_inner_service.py @@ -123,7 +123,7 @@ async def get_status(self): status_data["status"] = block_manager.service_status status_data["state"] = self._channel_service.state_machine.state - status_data["peer_type"] = str(block_manager.peer_type) + status_data["peer_type"] = str(1 if self._channel_service.state_machine.state == "BlockGenerate" else 0) status_data["audience_count"] = "0" status_data["consensus"] = str(conf.CONSENSUS_ALGORITHM.name) status_data["peer_id"] = str(ChannelProperty().peer_id) @@ -318,7 +318,7 @@ async def announce_unconfirmed_block(self, block_pickled) -> None: f"\nnext_leader_peer({unconfirmed_block.header.next_leader.hex()}, " f"channel({ChannelProperty().name}))") - if ChannelProperty().peer_id == unconfirmed_block.header.next_leader.hex_hx(): + if self._channel_service.peer_manager.get_leader_id(conf.ALL_GROUP_ID) != unconfirmed_block.header.next_leader.hex_hx(): await self._channel_service.reset_leader(unconfirmed_block.header.next_leader.hex_hx()) @message_queue_task diff --git a/loopchain/peer/block_manager.py b/loopchain/peer/block_manager.py index 8c1a1813d..b63679fc3 100644 --- a/loopchain/peer/block_manager.py +++ b/loopchain/peer/block_manager.py @@ -80,7 +80,8 @@ def channel_name(self): def service_status(self): # Return string for compatibility. if self.__service_status >= 0: - return "Service is online: " + str(self.peer_type) + return "Service is online: " + \ + str(1 if self.__channel_service.state_machine.state == "BlockGenerate" else 0) else: return "Service is offline: " + status_code.get_status_reason(self.__service_status) diff --git a/loopchain/utils/loggers/configuration_presets.py b/loopchain/utils/loggers/configuration_presets.py index 8b384d8c2..04ba60dec 100644 --- a/loopchain/utils/loggers/configuration_presets.py +++ b/loopchain/utils/loggers/configuration_presets.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import verboselogs from enum import Enum @@ -73,7 +72,7 @@ def update_preset(update_logger=True): preset.log_monitor_port = conf.MONITOR_LOG_PORT if preset is develop: - preset.log_level = verboselogs.SPAM + preset.log_level = verboselogs.NOTICE else: preset.log_level = conf.LOOPCHAIN_LOG_LEVEL From e9cab06ae602bd3506ba47a2f0706f4851d5d31f Mon Sep 17 00:00:00 2001 From: winDy Date: Fri, 18 Jan 2019 13:08:45 +0900 Subject: [PATCH 11/23] [LC-122] remove announce_new_leader in reset_leader of channel_service, add add_timer_convenient for code convenient. --- loopchain/peer/block_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/loopchain/peer/block_manager.py b/loopchain/peer/block_manager.py index b63679fc3..1bbdcf934 100644 --- a/loopchain/peer/block_manager.py +++ b/loopchain/peer/block_manager.py @@ -522,7 +522,7 @@ def __block_height_sync(self, target_peer_stub=None, target_height=None): break finally: if result: - my_height = block.header.height + my_height += 1 retry_number = 0 else: retry_number += 1 From b63348729b83ec8ce46e1d6b6cd66f871c4ef3c7 Mon Sep 17 00:00:00 2001 From: winDy Date: Tue, 22 Jan 2019 01:44:29 +0900 Subject: [PATCH 12/23] Preventing the height difference between unconfirmed block and last block by less than 1 when block height sync. --- loopchain/peer/block_manager.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/loopchain/peer/block_manager.py b/loopchain/peer/block_manager.py index 1bbdcf934..7968e1418 100644 --- a/loopchain/peer/block_manager.py +++ b/loopchain/peer/block_manager.py @@ -423,13 +423,11 @@ def stop_block_generate_timer(self): self.__consensus_algorithm.stop() def __current_block_height(self): - unconfirmed_block_height = -1 - if self.__blockchain.last_unconfirmed_block: - unconfirmed_block_height = self.__blockchain.last_unconfirmed_block.header.height - return max(unconfirmed_block_height, self.__blockchain.block_height) - - def __next_block_height(self): - return self.__current_block_height() + 1 + if self.__blockchain.last_unconfirmed_block and \ + self.__blockchain.last_unconfirmed_block.header.height == self.__blockchain.block_height + 1: + return self.__blockchain.block_height + 1 + else: + return self.__blockchain.block_height def __add_block_by_sync(self, block_): commit_state = block_.header.commit_state @@ -478,8 +476,7 @@ def __block_height_sync(self, target_peer_stub=None, target_height=None): logging.info(f"In block height sync max: {max_height} yours: {my_height}") - next_block_height = my_height + 1 - self.get_blockchain().prevent_next_block_mismatch(next_block_height) + self.get_blockchain().prevent_next_block_mismatch(self.__blockchain.block_height) try: while max_height > my_height: From 8a0927ac0ab40dd9c35d78cc49f2c9f6ef7bd61d Mon Sep 17 00:00:00 2001 From: winDy Date: Tue, 22 Jan 2019 16:53:13 +0900 Subject: [PATCH 13/23] Add Leader Complain Timer --- docs/8. diagram/state_machine_in_siever.wsd | 10 ++++-- loopchain/baseservice/broadcast_scheduler.py | 1 + loopchain/baseservice/timer_service.py | 1 + loopchain/blockchain/blockchain.py | 3 ++ loopchain/channel/channel_service.py | 12 +++++++ loopchain/channel/channel_statemachine.py | 38 ++++++++++++-------- loopchain/configure_default.py | 1 + loopchain/peer/block_manager.py | 2 +- 8 files changed, 50 insertions(+), 18 deletions(-) diff --git a/docs/8. diagram/state_machine_in_siever.wsd b/docs/8. diagram/state_machine_in_siever.wsd index cc7122cbe..32cca6e7d 100644 --- a/docs/8. diagram/state_machine_in_siever.wsd +++ b/docs/8. diagram/state_machine_in_siever.wsd @@ -47,10 +47,8 @@ state Consensus { Watch: Wait for new block announcement (subscribe_loop) Watch --> BlockHeightSync: Reconnect to RS Peer\n(after connection error) Vote: on_enter { - Vote: \tAddTimer: leader complain Vote: } Vote: on_exit { - Vote: \tStopTimer: leader complain Vote: } Vote: Vote: trigger::vote\n(Recv AnnounceUnConfirmedBlock (block.height == mine + 1)) @@ -69,12 +67,18 @@ state Consensus { BlockGenerate -> Vote : Send AnnounceConfirmedBlock\ntrigger::turn_to_peer\n(block_type == vote) state LeaderComplain - note right of LeaderComplain + note top of LeaderComplain keep leader complain Timer until new leader elected + leader complain timer start by AddTx(List), and stop by AddBlock end note LeaderComplain -> Vote : Recv AnnounceNewLeader\n(if next leader != self) LeaderComplain --> BlockGenerate : Send AnnounceNewLeader\n(if next leader == self) + LeaderComplain: on_enter { + LeaderComplain: } + LeaderComplain: on_exit { + LeaderComplain: } + LeaderComplain: } Consensus --> GracefulShutdown diff --git a/loopchain/baseservice/broadcast_scheduler.py b/loopchain/baseservice/broadcast_scheduler.py index 9a720e2fc..6a44f44c2 100644 --- a/loopchain/baseservice/broadcast_scheduler.py +++ b/loopchain/baseservice/broadcast_scheduler.py @@ -316,6 +316,7 @@ def __send_tx_by_timer(self, **kwargs): # Send multiple tx remains, message = self.__make_tx_list_message() self.__broadcast_run("AddTxList", message) + ObjectManager().channel_service.start_leader_complain_timer() if remains: self.__send_tx_in_timer() diff --git a/loopchain/baseservice/timer_service.py b/loopchain/baseservice/timer_service.py index 5e9364d90..c779b769c 100644 --- a/loopchain/baseservice/timer_service.py +++ b/loopchain/baseservice/timer_service.py @@ -103,6 +103,7 @@ class TimerService(CommonThread): TIMER_KEY_SHUTDOWN_WHEN_FAIL_SUBSCRIBE = "TIMER_KEY_SHUTDOWN_WHEN_FAIL_SUBSCRIBE" TIMER_KEY_BLOCK_GENERATE = "TIMER_KEY_BLOCK_GENERATE" TIMER_KEY_BROADCAST_SEND_UNCONFIRMED_BLOCK = "TIMER_KEY_BROADCAST_SEND_UNCONFIRMED_BLOCK" + TIMER_KEY_LEADER_COMPLAIN = "TIMER_KEY_LEADER_COMPLAIN" def __init__(self): CommonThread.__init__(self) diff --git a/loopchain/blockchain/blockchain.py b/loopchain/blockchain/blockchain.py index 88f263d1c..21d2a1041 100644 --- a/loopchain/blockchain/blockchain.py +++ b/loopchain/blockchain/blockchain.py @@ -261,6 +261,9 @@ def __add_block(self, block: Block, vote: Vote = None): 'block_height': self.__block_height }}) + # stop leader complain timer + ObjectManager().channel_service.stop_leader_complain_timer() + # notify new block ObjectManager().channel_service.inner_service.notify_new_block() diff --git a/loopchain/channel/channel_service.py b/loopchain/channel/channel_service.py index 63df0f0fb..334a01e31 100644 --- a/loopchain/channel/channel_service.py +++ b/loopchain/channel/channel_service.py @@ -834,8 +834,20 @@ def get_object_has_queue_by_consensus(self): else: object_has_queue = self.__block_manager + self.start_leader_complain_timer() + return object_has_queue + def start_leader_complain_timer(self): + util.logger.notice(f"start_leader_complain_timer in channel service.") + self.__timer_service.add_timer_convenient(timer_key=TimerService.TIMER_KEY_LEADER_COMPLAIN, + duration=conf.TIMEOUT_FOR_LEADER_COMPLAIN, + is_repeat=True, callback=self.state_machine.leader_complain) + + def stop_leader_complain_timer(self): + util.logger.notice(f"stop_leader_complain_timer in channel service.") + self.__timer_service.stop_timer(TimerService.TIMER_KEY_LEADER_COMPLAIN) + def start_subscribe_timer(self): timer_key = TimerService.TIMER_KEY_SUBSCRIBE if timer_key not in self.__timer_service.timer_list: diff --git a/loopchain/channel/channel_statemachine.py b/loopchain/channel/channel_statemachine.py index 01395d396..4651fdf6f 100644 --- a/loopchain/channel/channel_statemachine.py +++ b/loopchain/channel/channel_statemachine.py @@ -13,12 +13,12 @@ # limitations under the License. """State Machine for Channel Service""" import asyncio + from earlgrey import MessageQueueService from transitions import State import loopchain.utils as util from loopchain import configure as conf -from loopchain.utils import loggers from loopchain.peer import status_code from loopchain.protos import loopchain_pb2 from loopchain.statemachine import statemachine @@ -27,16 +27,19 @@ @statemachine.StateMachine("Channel State Machine") class ChannelStateMachine(object): states = ['InitComponents', - State(name='Consensus', on_enter='_consensus_on_enter'), - State(name='BlockHeightSync', on_enter='_blockheightsync_on_enter'), + State(name='Consensus', ignore_invalid_triggers=True, on_enter='_consensus_on_enter'), + State(name='BlockHeightSync', ignore_invalid_triggers=True, on_enter='_blockheightsync_on_enter'), 'EvaluateNetwork', - State(name='BlockSync', on_enter='_blocksync_on_enter', on_exit='_blocksync_on_exit'), - State(name='SubscribeNetwork', on_enter='_subscribe_network_on_enter', + State(name='BlockSync', ignore_invalid_triggers=True, + on_enter='_blocksync_on_enter', on_exit='_blocksync_on_exit'), + State(name='SubscribeNetwork', ignore_invalid_triggers=True, on_enter='_subscribe_network_on_enter', on_exit='_subscribe_network_on_exit'), 'Watch', - State(name='Vote', on_enter='_vote_on_enter', on_exit='_vote_on_exit'), - State(name='BlockGenerate', on_enter='_blockgenerate_on_enter', on_exit='_blockgenerate_on_exit'), - 'LeaderComplain', + State(name='Vote', ignore_invalid_triggers=True, on_enter='_vote_on_enter', on_exit='_vote_on_exit'), + State(name='BlockGenerate', ignore_invalid_triggers=True, + on_enter='_blockgenerate_on_enter', on_exit='_blockgenerate_on_exit'), + State(name='LeaderComplain', ignore_invalid_triggers=True, + on_enter='_leadercomplain_on_enter', on_exit='_leadercomplain_on_exit'), 'GracefulShutdown'] init_state = 'InitComponents' state = init_state @@ -89,6 +92,10 @@ def turn_to_peer(self): def turn_to_leader(self): pass + @statemachine.transition(source='Vote', dest='LeaderComplain') + def leader_complain(self): + pass + def _is_leader(self): return self.__channel_service.block_manager.peer_type == loopchain_pb2.BLOCK_GENERATOR @@ -135,17 +142,20 @@ def _do_vote(self): self.__channel_service.block_manager.vote_as_peer() def _vote_on_enter(self): - loggers.get_preset().is_leader = False - loggers.get_preset().update_logger() - util.logger.spam(f"\nvote_on_enter") + pass def _vote_on_exit(self): - util.logger.spam(f"\nvote_on_exit") + # util.logger.notice(f"_vote_on_exit") + pass def _blockgenerate_on_enter(self): - loggers.get_preset().is_leader = True - loggers.get_preset().update_logger() self.__channel_service.block_manager.start_block_generate_timer() def _blockgenerate_on_exit(self): self.__channel_service.block_manager.stop_block_generate_timer() + + def _leadercomplain_on_enter(self): + util.logger.notice(f"_leadercomplain_on_enter") + + def _leadercomplain_on_exit(self): + util.logger.notice(f"_leadercomplain_on_exit") diff --git a/loopchain/configure_default.py b/loopchain/configure_default.py index f7e8a3f2f..cc5df33a8 100644 --- a/loopchain/configure_default.py +++ b/loopchain/configure_default.py @@ -443,3 +443,4 @@ def is_support_node_function(cls, node_function, node_type): CONF_PATH_ICONRPCSERVER_DEV = os.path.join(LOOPCHAIN_ROOT_PATH, 'conf/develop/iconrpcserver_conf.json') CONF_PATH_ICONRPCSERVER_TESTNET = os.path.join(LOOPCHAIN_ROOT_PATH, 'conf/testnet/iconrpcserver_conf.json') CONF_PATH_ICONRPCSERVER_MAINNET = os.path.join(LOOPCHAIN_ROOT_PATH, 'conf/mainnet/iconrpcserver_conf.json') +TIMEOUT_FOR_LEADER_COMPLAIN = INTERVAL_BLOCKGENERATION * 30 diff --git a/loopchain/peer/block_manager.py b/loopchain/peer/block_manager.py index 7968e1418..da96d3110 100644 --- a/loopchain/peer/block_manager.py +++ b/loopchain/peer/block_manager.py @@ -19,7 +19,7 @@ from jsonrpcclient.exceptions import ReceivedErrorResponse -from loopchain.baseservice import BroadcastCommand, TimerService +from loopchain.baseservice import TimerService from loopchain.consensus import * from loopchain.peer import status_code from loopchain.peer.consensus_siever import ConsensusSiever From 07508f4688165c58abddad19b16af6a3d50f8a2e Mon Sep 17 00:00:00 2001 From: Daehee Kim Date: Tue, 22 Jan 2019 20:52:33 +0900 Subject: [PATCH 14/23] Add is_complain "0.2" block header --- loopchain/blockchain/blocks/block_builder.py | 15 ++++++++- .../blockchain/blocks/block_serializer.py | 14 +++++++- .../blockchain/blocks/v0_1a/block_builder.py | 32 +++++++++++------- .../blocks/v0_1a/block_serializer.py | 33 ++++++++++--------- loopchain/blockchain/blocks/v0_2/block.py | 2 ++ .../blockchain/blocks/v0_2/block_builder.py | 17 ++++++++++ .../blocks/v0_2/block_serializer.py | 6 ++++ 7 files changed, 90 insertions(+), 29 deletions(-) diff --git a/loopchain/blockchain/blocks/block_builder.py b/loopchain/blockchain/blocks/block_builder.py index 8b8a9fee8..9fb278a88 100644 --- a/loopchain/blockchain/blocks/block_builder.py +++ b/loopchain/blockchain/blocks/block_builder.py @@ -1,5 +1,5 @@ import hashlib -from abc import ABC +from abc import ABC, abstractmethod from collections import OrderedDict from secp256k1 import PrivateKey from typing import Dict @@ -42,6 +42,19 @@ def reset_cache(self): def build(self) -> 'Block': raise NotImplementedError + def build_block(self): + header = self.BlockHeaderClass(**self.build_block_header_data()) + body = self.BlockBodyClass(**self.build_block_body_data()) + return Block(header, body) + + @abstractmethod + def build_block_header_data(self) -> dict: + raise NotImplementedError + + @abstractmethod + def build_block_body_data(self) -> dict: + raise NotImplementedError + def build_hash(self): if self.prev_hash is None: raise RuntimeError diff --git a/loopchain/blockchain/blocks/block_serializer.py b/loopchain/blockchain/blocks/block_serializer.py index abaf4209e..a5ecf5f60 100644 --- a/loopchain/blockchain/blocks/block_serializer.py +++ b/loopchain/blockchain/blocks/block_serializer.py @@ -32,8 +32,20 @@ def deserialize(self, block_dumped: dict) -> 'Block': "The block of this version cannot be deserialized by the serializer.") return self._deserialize(block_dumped) + def _deserialize(self, json_data): + header_data = self._deserialize_header_data(json_data) + header = self.BlockHeaderClass(**header_data) + + body_data = self._deserialize_body_data(json_data) + body = self.BlockBodyClass(**body_data) + return Block(header, body) + + @abstractmethod + def _deserialize_header_data(self, json_data: dict): + raise NotImplementedError + @abstractmethod - def _deserialize(self, block_dumped: dict) -> 'Block': + def _deserialize_body_data(self, json_data: dict): raise NotImplementedError @classmethod diff --git a/loopchain/blockchain/blocks/v0_1a/block_builder.py b/loopchain/blockchain/blocks/v0_1a/block_builder.py index 859858f82..3b73ed2e7 100644 --- a/loopchain/blockchain/blocks/v0_1a/block_builder.py +++ b/loopchain/blockchain/blocks/v0_1a/block_builder.py @@ -45,20 +45,28 @@ def build(self): self.build_peer_id() self.sign() - header = self.BlockHeaderClass( - hash=self.hash, - prev_hash=self.prev_hash, - height=self.height, - timestamp=self._timestamp, - peer_id=self.peer_id, - signature=self.signature, - next_leader=self.next_leader, - merkle_tree_root_hash=self.merkle_tree_root_hash, - commit_state=self.commit_state) - body = self.BlockBodyClass(self.transactions, self.confirm_prev_block) - self.block = Block(header, body) + self.block = self.build_block() return self.block + def build_block_header_data(self): + return { + "hash": self.hash, + "prev_hash": self.prev_hash, + "height": self.height, + "timestamp": self._timestamp, + "peer_id": self.peer_id, + "signature": self.signature, + "next_leader": self.next_leader, + "merkle_tree_root_hash": self.merkle_tree_root_hash, + "commit_state": self.commit_state + } + + def build_block_body_data(self): + return { + "transactions": self.transactions, + "confirm_prev_block": self.confirm_prev_block + } + def build_merkle_tree_root_hash(self): if self.merkle_tree_root_hash is not None: return self.merkle_tree_root_hash diff --git a/loopchain/blockchain/blocks/v0_1a/block_serializer.py b/loopchain/blockchain/blocks/v0_1a/block_serializer.py index 97834e14d..97310e3fb 100644 --- a/loopchain/blockchain/blocks/v0_1a/block_serializer.py +++ b/loopchain/blockchain/blocks/v0_1a/block_serializer.py @@ -32,7 +32,7 @@ def _serialize(self, block: 'Block'): "commit_state": header.commit_state } - def _deserialize(self, json_data): + def _deserialize_header_data(self, json_data: dict): prev_hash = json_data.get('prev_block_hash') prev_hash = Hash32.fromhex(prev_hash, ignore_prefix=True) if prev_hash else None @@ -45,19 +45,20 @@ def _deserialize(self, json_data): next_leader = json_data.get("next_leader") next_leader = ExternalAddress.fromhex(next_leader) if next_leader else None - confirm_prev_block = json_data.get("confirm_prev_block") + return { + "hash": Hash32.fromhex(json_data["block_hash"], ignore_prefix=True), + "prev_hash": prev_hash, + "height": json_data["height"], + "timestamp": json_data["time_stamp"], + "peer_id": peer_id, + "signature": signature, + "next_leader": next_leader, + "merkle_tree_root_hash": Hash32.fromhex(json_data["merkle_tree_root_hash"], ignore_prefix=True), + "commit_state": json_data["commit_state"] + } - header = self.BlockHeaderClass( - hash=Hash32.fromhex(json_data["block_hash"], ignore_prefix=True), - prev_hash=prev_hash, - height=json_data["height"], - timestamp=json_data["time_stamp"], - peer_id=peer_id, - signature=signature, - next_leader=next_leader, - merkle_tree_root_hash=Hash32.fromhex(json_data["merkle_tree_root_hash"], ignore_prefix=True), - commit_state=json_data.get("commit_state") - ) + def _deserialize_body_data(self, json_data: dict): + confirm_prev_block = json_data.get("confirm_prev_block") transactions = OrderedDict() for tx_data in json_data['confirmed_transaction_list']: @@ -66,5 +67,7 @@ def _deserialize(self, json_data): tx = ts.from_(tx_data) transactions[tx.hash] = tx - body = self.BlockBodyClass(transactions, confirm_prev_block) - return Block(header, body) + return { + "confirm_prev_block": confirm_prev_block, + "transactions": transactions + } diff --git a/loopchain/blockchain/blocks/v0_2/block.py b/loopchain/blockchain/blocks/v0_2/block.py index bae954c4d..403c97020 100644 --- a/loopchain/blockchain/blocks/v0_2/block.py +++ b/loopchain/blockchain/blocks/v0_2/block.py @@ -4,6 +4,8 @@ @dataclass(frozen=True) class BlockHeader(v0_1a.BlockHeader): + is_complain: bool + version = "0.2" diff --git a/loopchain/blockchain/blocks/v0_2/block_builder.py b/loopchain/blockchain/blocks/v0_2/block_builder.py index e3836027c..add9eb88f 100644 --- a/loopchain/blockchain/blocks/v0_2/block_builder.py +++ b/loopchain/blockchain/blocks/v0_2/block_builder.py @@ -1,8 +1,25 @@ +from typing import TYPE_CHECKING from . import BlockHeader, BlockBody from .. import v0_1a +if TYPE_CHECKING: + from ... import TransactionVersioner + class BlockBuilder(v0_1a.BlockBuilder): version = BlockHeader.version BlockHeaderClass = BlockHeader BlockBodyClass = BlockBody + + def __init__(self, tx_versioner: 'TransactionVersioner'): + super().__init__(tx_versioner) + self.is_complain = False + + def reset_cache(self): + super().reset_cache() + self.is_complain = False + + def build_block_header_data(self): + header_data = super().build_block_header_data() + header_data["is_complain"] = self.is_complain + return header_data diff --git a/loopchain/blockchain/blocks/v0_2/block_serializer.py b/loopchain/blockchain/blocks/v0_2/block_serializer.py index 9979bfb08..56b2d19bc 100644 --- a/loopchain/blockchain/blocks/v0_2/block_serializer.py +++ b/loopchain/blockchain/blocks/v0_2/block_serializer.py @@ -11,4 +11,10 @@ def _serialize(self, block: 'Block'): header: BlockHeader = block.header block_serialized = super()._serialize(block) block_serialized["next_leader"] = header.next_leader.hex_xx() + block_serialized["is_complain"] = "0x1" if header.is_complain else "0x0" return block_serialized + + def _deserialize_header_data(self, json_data: dict): + header = super()._deserialize_header_data(json_data) + header["is_complain"] = True if json_data["is_complain"] == "0x1" else False + return header From 6328cf8586b68d340901ce1c18a5d95f0528c592 Mon Sep 17 00:00:00 2001 From: winDy Date: Tue, 22 Jan 2019 20:54:23 +0900 Subject: [PATCH 15/23] Add leader complain scenario --- docs/1. specification/leader_complain.md | 13 +++- docs/8. diagram/state_machine_in_siever.wsd | 1 + loopchain/channel/channel_inner_service.py | 15 ++-- loopchain/channel/channel_service.py | 77 ++++++--------------- loopchain/configure_default.py | 2 +- loopchain/peer/consensus_siever.py | 2 +- 6 files changed, 45 insertions(+), 65 deletions(-) diff --git a/docs/1. specification/leader_complain.md b/docs/1. specification/leader_complain.md index 5b1e36354..36c7c2c49 100644 --- a/docs/1. specification/leader_complain.md +++ b/docs/1. specification/leader_complain.md @@ -21,7 +21,18 @@ - If receive 'AnnounceUnconfirmedBlock' in state 'LeaderComplain' then Peer should not vote. * Increase block generate time interval - After each leader complain phase, next leader get more *2 time interval than prev leader. - + + +#### Complain Process (new) + * leader complain Timer + - start when "AddTx(List)" + - stop when "Add Block" + * Complain Block + - Set is_complain flag True in block header + - It made by peer (the order of priority: from prev_leader to next 1 by 1 except complain peer) + - No tx in block + - Confirm by next block (Normal Voting Block, is_complain=False) + - Include prev block votes #### Complain message ``` diff --git a/docs/8. diagram/state_machine_in_siever.wsd b/docs/8. diagram/state_machine_in_siever.wsd index 32cca6e7d..953540d5a 100644 --- a/docs/8. diagram/state_machine_in_siever.wsd +++ b/docs/8. diagram/state_machine_in_siever.wsd @@ -75,6 +75,7 @@ state Consensus { LeaderComplain -> Vote : Recv AnnounceNewLeader\n(if next leader != self) LeaderComplain --> BlockGenerate : Send AnnounceNewLeader\n(if next leader == self) LeaderComplain: on_enter { + LeaderComplain: \tbroadcast complain LeaderComplain: } LeaderComplain: on_exit { LeaderComplain: } diff --git a/loopchain/channel/channel_inner_service.py b/loopchain/channel/channel_inner_service.py index 6d99f8ffd..0521f57d5 100644 --- a/loopchain/channel/channel_inner_service.py +++ b/loopchain/channel/channel_inner_service.py @@ -107,19 +107,20 @@ async def reset_leader(self, new_leader, block_height=0) -> None: @message_queue_task(priority=255) async def get_status(self): - block_height = 0 - total_tx = 0 - status_data = dict() - block_manager = self._channel_service.block_manager status_data["made_block_count"] = block_manager.made_block_count + + block_height = 0 unconfirmed_block_height = None last_block = block_manager.get_blockchain().last_block + last_unconfirmed_block = block_manager.get_blockchain().last_unconfirmed_block + if last_block: block_height = last_block.header.height - total_tx = block_manager.get_total_tx() - logging.debug(f"last_block height({block_height}), hash({last_block.header.hash})") + + if last_unconfirmed_block: + unconfirmed_block_height = last_unconfirmed_block.header.height status_data["status"] = block_manager.service_status status_data["state"] = self._channel_service.state_machine.state @@ -129,7 +130,7 @@ async def get_status(self): status_data["peer_id"] = str(ChannelProperty().peer_id) status_data["block_height"] = block_height status_data["unconfirmed_block_height"] = unconfirmed_block_height or -1 - status_data["total_tx"] = total_tx + status_data["total_tx"] = block_manager.get_total_tx() status_data["unconfirmed_tx"] = block_manager.get_count_of_unconfirmed_tx() status_data["peer_target"] = ChannelProperty().peer_target status_data["leader_complaint"] = 1 diff --git a/loopchain/channel/channel_service.py b/loopchain/channel/channel_service.py index 334a01e31..a54e060b1 100644 --- a/loopchain/channel/channel_service.py +++ b/loopchain/channel/channel_service.py @@ -436,16 +436,10 @@ def connect_to_radio_station(self, is_reconnect=False): timeout=conf.CONNECTION_TIMEOUT_TO_RS) # start next ConnectPeer timer - if TimerService.TIMER_KEY_CONNECT_PEER not in self.__timer_service.timer_list: - self.__timer_service.add_timer( - TimerService.TIMER_KEY_CONNECT_PEER, - Timer( - target=TimerService.TIMER_KEY_CONNECT_PEER, - duration=conf.CONNECTION_RETRY_TIMER, - callback=self.connect_to_radio_station, - callback_kwargs={"is_reconnect": True} - ) - ) + self.__timer_service.add_timer_convenient(timer_key=TimerService.TIMER_KEY_CONNECT_PEER, + duration=conf.CONNECTION_RETRY_TIMER, + callback=self.connect_to_radio_station, + callback_kwargs={"is_reconnect": True}) if is_reconnect: return @@ -660,8 +654,11 @@ def show_peers(self): async def reset_leader(self, new_leader_id, block_height=0): logging.info(f"RESET LEADER channel({ChannelProperty().name}) leader_id({new_leader_id})") leader_peer = self.peer_manager.get_peer(new_leader_id, None) - if block_height > 0 and block_height != self.block_manager.get_blockchain().last_block.height + 1: - logging.warning(f"height behind peer can not take leader role.") + + if block_height > 0 and block_height != self.block_manager.get_blockchain().last_block.header.height + 1: + util.logger.warning(f"height behind peer can not take leader role. block_height({block_height}), " + f"last_block.header.height(" + f"{self.block_manager.get_blockchain().last_block.header.height})") return if leader_peer is None: @@ -849,56 +846,26 @@ def stop_leader_complain_timer(self): self.__timer_service.stop_timer(TimerService.TIMER_KEY_LEADER_COMPLAIN) def start_subscribe_timer(self): - timer_key = TimerService.TIMER_KEY_SUBSCRIBE - if timer_key not in self.__timer_service.timer_list: - self.__timer_service.add_timer( - timer_key, - Timer( - target=timer_key, - duration=conf.SUBSCRIBE_RETRY_TIMER, - is_repeat=True, - callback=self.subscribe_network - ) - ) + self.__timer_service.add_timer_convenient(timer_key=TimerService.TIMER_KEY_SUBSCRIBE, + duration=conf.SUBSCRIBE_RETRY_TIMER, + is_repeat=True, callback=self.subscribe_network) def stop_subscribe_timer(self): - if TimerService.TIMER_KEY_SUBSCRIBE in self.__timer_service.timer_list: - self.__timer_service.stop_timer(TimerService.TIMER_KEY_SUBSCRIBE) + self.__timer_service.stop_timer(TimerService.TIMER_KEY_SUBSCRIBE) def start_check_last_block_rs_timer(self): - timer_key = TimerService.TIMER_KEY_GET_LAST_BLOCK_KEEP_CITIZEN_SUBSCRIPTION - if timer_key not in self.__timer_service.timer_list: - util.logger.spam(f"add timer for check_block_height_call to radiostation...") - self.__timer_service.add_timer( - timer_key, - Timer( - target=timer_key, - duration=conf.GET_LAST_BLOCK_TIMER, - is_repeat=True, - callback=self.__check_last_block_to_rs - ) - ) + self.__timer_service.add_timer_convenient( + timer_key=TimerService.TIMER_KEY_GET_LAST_BLOCK_KEEP_CITIZEN_SUBSCRIPTION, + duration=conf.GET_LAST_BLOCK_TIMER, is_repeat=True, callback=self.__check_last_block_to_rs) def stop_check_last_block_rs_timer(self): - timer_key = TimerService.TIMER_KEY_GET_LAST_BLOCK_KEEP_CITIZEN_SUBSCRIPTION - if timer_key in self.__timer_service.timer_list: - self.__timer_service.stop_timer(timer_key) + self.__timer_service.stop_timer(TimerService.TIMER_KEY_GET_LAST_BLOCK_KEEP_CITIZEN_SUBSCRIPTION) def start_shutdown_timer(self): - timer_key = TimerService.TIMER_KEY_SHUTDOWN_WHEN_FAIL_SUBSCRIBE - if timer_key not in self.__timer_service.timer_list: - error = f"Shutdown by Subscribe retry timeout({conf.SHUTDOWN_TIMER} sec)" - self.__timer_service.add_timer( - timer_key, - Timer( - target=timer_key, - duration=conf.SHUTDOWN_TIMER, - callback=self.shutdown_peer, - callback_kwargs={"message": error} - ) - ) + error = f"Shutdown by Subscribe retry timeout({conf.SHUTDOWN_TIMER} sec)" + self.__timer_service.add_timer_convenient(timer_key=TimerService.TIMER_KEY_SHUTDOWN_WHEN_FAIL_SUBSCRIBE, + duration=conf.SHUTDOWN_TIMER, callback=self.shutdown_peer, + callback_kwargs={"message": error}) def stop_shutdown_timer(self): - timer_key = TimerService.TIMER_KEY_SHUTDOWN_WHEN_FAIL_SUBSCRIBE - if timer_key in self.__timer_service.timer_list: - self.__timer_service.stop_timer(timer_key) + self.__timer_service.stop_timer(TimerService.TIMER_KEY_SHUTDOWN_WHEN_FAIL_SUBSCRIBE) diff --git a/loopchain/configure_default.py b/loopchain/configure_default.py index cc5df33a8..a3d448e84 100644 --- a/loopchain/configure_default.py +++ b/loopchain/configure_default.py @@ -443,4 +443,4 @@ def is_support_node_function(cls, node_function, node_type): CONF_PATH_ICONRPCSERVER_DEV = os.path.join(LOOPCHAIN_ROOT_PATH, 'conf/develop/iconrpcserver_conf.json') CONF_PATH_ICONRPCSERVER_TESTNET = os.path.join(LOOPCHAIN_ROOT_PATH, 'conf/testnet/iconrpcserver_conf.json') CONF_PATH_ICONRPCSERVER_MAINNET = os.path.join(LOOPCHAIN_ROOT_PATH, 'conf/mainnet/iconrpcserver_conf.json') -TIMEOUT_FOR_LEADER_COMPLAIN = INTERVAL_BLOCKGENERATION * 30 +TIMEOUT_FOR_LEADER_COMPLAIN = INTERVAL_BLOCKGENERATION * 2 # 30 # 30 is default for product diff --git a/loopchain/peer/consensus_siever.py b/loopchain/peer/consensus_siever.py index dd74e8126..d4c4c12a2 100644 --- a/loopchain/peer/consensus_siever.py +++ b/loopchain/peer/consensus_siever.py @@ -19,7 +19,7 @@ import loopchain.utils as util from loopchain import configure as conf from loopchain.baseservice import ObjectManager, TimerService, SlotTimer, Timer -from loopchain.blockchain import ExternalAddress, BlockBuilder, BlockVerifier, TransactionStatusInQueue, Hash32 +from loopchain.blockchain import ExternalAddress, BlockVerifier, Hash32 from loopchain.channel.channel_property import ChannelProperty from loopchain.peer.consensus_base import ConsensusBase From 4a576baf09a0434b28d5672367ae6df1bf5efddc Mon Sep 17 00:00:00 2001 From: winDy Date: Wed, 23 Jan 2019 21:42:37 +0900 Subject: [PATCH 16/23] Merge with develop --- loopchain/channel/channel_service.py | 2 +- loopchain/channel/channel_statemachine.py | 6 +++++- loopchain/configure_default.py | 5 +++-- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/loopchain/channel/channel_service.py b/loopchain/channel/channel_service.py index a54e060b1..7688d53e3 100644 --- a/loopchain/channel/channel_service.py +++ b/loopchain/channel/channel_service.py @@ -691,7 +691,7 @@ async def reset_leader(self, new_leader_id, block_height=0): self.state_machine.turn_to_peer() # 새 leader 에게 subscribe 하기 - await self.subscribe_to_radio_station() + # await self.subscribe_to_radio_station() await self.subscribe_to_peer(peer_leader.peer_id, loopchain_pb2.BLOCK_GENERATOR) self.block_manager.set_peer_type(peer_type) diff --git a/loopchain/channel/channel_statemachine.py b/loopchain/channel/channel_statemachine.py index 4651fdf6f..2fb7866ae 100644 --- a/loopchain/channel/channel_statemachine.py +++ b/loopchain/channel/channel_statemachine.py @@ -22,6 +22,7 @@ from loopchain.peer import status_code from loopchain.protos import loopchain_pb2 from loopchain.statemachine import statemachine +from loopchain.utils import loggers @statemachine.StateMachine("Channel State Machine") @@ -142,13 +143,16 @@ def _do_vote(self): self.__channel_service.block_manager.vote_as_peer() def _vote_on_enter(self): - pass + loggers.get_preset().is_leader = False + loggers.get_preset().update_logger() def _vote_on_exit(self): # util.logger.notice(f"_vote_on_exit") pass def _blockgenerate_on_enter(self): + loggers.get_preset().is_leader = True + loggers.get_preset().update_logger() self.__channel_service.block_manager.start_block_generate_timer() def _blockgenerate_on_exit(self): diff --git a/loopchain/configure_default.py b/loopchain/configure_default.py index a3d448e84..682eeea6d 100644 --- a/loopchain/configure_default.py +++ b/loopchain/configure_default.py @@ -331,7 +331,7 @@ def is_support_node_function(cls, node_function, node_type): LOOPCHAIN_TEST_CHANNEL = "loopchain_test" CHANNEL_MANAGE_DATA_PATH = os.path.join(LOOPCHAIN_ROOT_PATH, 'channel_manage_data.json') # Channel Manage Data Path ENABLE_CHANNEL_AUTH = False # if this option is true, peer only gets channel infos to which it belongs. -ENABLE_REP_RADIO_STATION = True +ENABLE_REP_RADIO_STATION = False CHANNEL_RESTART_TIMEOUT = 120 CHANNEL_BUILTIN = True @@ -443,4 +443,5 @@ def is_support_node_function(cls, node_function, node_type): CONF_PATH_ICONRPCSERVER_DEV = os.path.join(LOOPCHAIN_ROOT_PATH, 'conf/develop/iconrpcserver_conf.json') CONF_PATH_ICONRPCSERVER_TESTNET = os.path.join(LOOPCHAIN_ROOT_PATH, 'conf/testnet/iconrpcserver_conf.json') CONF_PATH_ICONRPCSERVER_MAINNET = os.path.join(LOOPCHAIN_ROOT_PATH, 'conf/mainnet/iconrpcserver_conf.json') -TIMEOUT_FOR_LEADER_COMPLAIN = INTERVAL_BLOCKGENERATION * 2 # 30 # 30 is default for product +# TIMEOUT_FOR_LEADER_COMPLAIN = INTERVAL_BLOCKGENERATION * 2 # 2 is test value for developing leader complain +TIMEOUT_FOR_LEADER_COMPLAIN = INTERVAL_BLOCKGENERATION * 30 # 30 is default for product From 6498897512e08da01a5c0d0cbe56cfa235053028 Mon Sep 17 00:00:00 2001 From: winDy Date: Thu, 24 Jan 2019 03:47:58 +0900 Subject: [PATCH 17/23] Add Epoch Class (It manages the information needed during consensus to store one block height. Candidate Blocks, Quorum, Votes and Leader Complaints.) --- loopchain/baseservice/peer_manager.py | 2 +- loopchain/blockchain/__init__.py | 3 +- loopchain/blockchain/blockchain.py | 5 +- loopchain/blockchain/epoch.py | 52 +++++++++ loopchain/channel/channel_inner_service.py | 34 +++--- loopchain/channel/channel_service.py | 84 +++++++------- loopchain/channel/channel_statemachine.py | 51 +++++---- loopchain/consensus/__init__.py | 1 - loopchain/consensus/epoch.py | 124 --------------------- loopchain/peer/block_manager.py | 92 ++++++++------- loopchain/peer/channel_manager.py | 9 +- loopchain/peer/consensus_siever.py | 3 + loopchain/peer/peer_outer_service.py | 55 +++------ loopchain/protos/loopchain.proto | 3 +- testcase/unittest/test_event_system.py | 4 + 15 files changed, 220 insertions(+), 302 deletions(-) create mode 100644 loopchain/blockchain/epoch.py delete mode 100644 loopchain/consensus/epoch.py diff --git a/loopchain/baseservice/peer_manager.py b/loopchain/baseservice/peer_manager.py index 05b427d7f..694f26150 100644 --- a/loopchain/baseservice/peer_manager.py +++ b/loopchain/baseservice/peer_manager.py @@ -361,7 +361,7 @@ def leader_complain_to_rs(self, group_id, is_announce_new_peer=True) -> PeerInfo return leader_peer def get_next_leader_peer(self, group_id=None, current_leader_peer_id=None, is_only_alive=False): - util.logger.spam(f"peer_manager:get_next_leader_peer") + util.logger.spam(f"peer_manager:get_next_leader_peer current_leader_peer_id({current_leader_peer_id})") if not current_leader_peer_id: leader_peer = self.get_leader_peer(group_id, is_complain_to_rs=True) diff --git a/loopchain/blockchain/__init__.py b/loopchain/blockchain/__init__.py index ee953c249..4ea365aa7 100644 --- a/loopchain/blockchain/__init__.py +++ b/loopchain/blockchain/__init__.py @@ -18,5 +18,6 @@ from .score_base import * from .transactions import * from .blocks import * -from .blockchain import * from .candidate_blocks import * +from .epoch import * +from .blockchain import * diff --git a/loopchain/blockchain/blockchain.py b/loopchain/blockchain/blockchain.py index 21d2a1041..16db74962 100644 --- a/loopchain/blockchain/blockchain.py +++ b/loopchain/blockchain/blockchain.py @@ -23,7 +23,7 @@ from loopchain.baseservice import ScoreResponse, ObjectManager from loopchain.blockchain import (Block, BlockBuilder, BlockSerializer, BlockVersioner, Transaction, TransactionBuilder, TransactionSerializer, - Hash32, ExternalAddress, TransactionVersioner, Vote) + Hash32, ExternalAddress, TransactionVersioner, Vote, Epoch) from loopchain.blockchain.exception import * from loopchain.blockchain.score_base import * from loopchain.channel.channel_property import ChannelProperty @@ -264,6 +264,9 @@ def __add_block(self, block: Block, vote: Vote = None): # stop leader complain timer ObjectManager().channel_service.stop_leader_complain_timer() + # start new epoch + ObjectManager().channel_service.block_manager.epoch = Epoch.new_epoch(block.header.height + 1) + # notify new block ObjectManager().channel_service.inner_service.notify_new_block() diff --git a/loopchain/blockchain/epoch.py b/loopchain/blockchain/epoch.py new file mode 100644 index 000000000..7bcbf3627 --- /dev/null +++ b/loopchain/blockchain/epoch.py @@ -0,0 +1,52 @@ +# Copyright 2018 ICON Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""It manages the information needed during consensus to store one block height. +Candidate Blocks, Quorum, Votes and Leader Complaints. +""" +import loopchain.utils as util + +from loopchain.baseservice import ObjectManager +from loopchain.blockchain import Vote + + +class Epoch: + COMPLAIN_VOTE_HASH = "complain_vote_hash_for_reuse_Vote_class" + + def __init__(self, height: int, leader_id=None): + util.logger.notice(f"New Epoch Start height({height}) leader_id({leader_id})") + self.height = height + self.leader_id = leader_id + + # TODO using Epoch in BlockManager instead using candidate_blocks directly. + # But now! only collect leader complain votes. + self.__candidate_blocks = None + self.__complain_vote = Vote(Epoch.COMPLAIN_VOTE_HASH, ObjectManager().channel_service.peer_manager) + + @staticmethod + def new_epoch(height: int, leader_id=None): + if leader_id is None and ObjectManager().channel_service.block_manager.epoch.leader_id: + leader_id = ObjectManager().channel_service.block_manager.epoch.leader_id + return Epoch(height, leader_id) + + def set_epoch_leader(self, leader_id): + util.logger.notice(f"Set Epoch leader height({self.height}) leader_id({leader_id})") + self.leader_id = leader_id + + def add_complain(self, complained_leader_id, new_leader_id, block_height): + util.logger.notice(f"add_complain complain_leader_id({complained_leader_id}), " + f"new_leader_id({new_leader_id}), " + f"block_height({block_height})") + + def complain_result(self): + pass diff --git a/loopchain/channel/channel_inner_service.py b/loopchain/channel/channel_inner_service.py index 0521f57d5..a31be9c2f 100644 --- a/loopchain/channel/channel_inner_service.py +++ b/loopchain/channel/channel_inner_service.py @@ -28,7 +28,6 @@ BlockSerializer, blocks, Hash32) from loopchain.blockchain.exception import * from loopchain.channel.channel_property import ChannelProperty -from loopchain.consensus import Epoch, VoteMessage from loopchain.peer.consensus_siever import ConsensusSiever from loopchain.protos import loopchain_pb2, message_code @@ -364,15 +363,6 @@ async def announce_confirmed_block(self, serialized_block, commit_state="{}"): response_code = message_code.Response.fail return response_code - @message_queue_task - def announce_new_block_for_vote(self, block: Block, epoch: Epoch): - acceptor = self._channel_service.acceptor - if acceptor.epoch is None: - pass - else: - acceptor.epoch.block_hash = block.header.hash.hex() - acceptor.create_vote(block=block, epoch=epoch) - @message_queue_task def block_sync(self, block_hash, block_height): blockchain = self._channel_service.block_manager.get_blockchain() @@ -434,7 +424,7 @@ def delete_peer(self, peer_id, group_id) -> None: @message_queue_task(type_=MessageQueueType.Worker) def vote_unconfirmed_block(self, peer_id, group_id, block_hash: Hash32, vote_code) -> None: block_manager = self._channel_service.block_manager - util.logger.spam(f"channel_inner_service:VoteUnconfirmedBlock " + util.logger.spam(f"channel_inner_service:vote_unconfirmed_block " f"({ChannelProperty().name}) block_hash({block_hash})") util.logger.debug("Peer vote to : " + block_hash.hex()[:8] + " " + str(vote_code) + f"from {peer_id[:8]}") @@ -450,13 +440,21 @@ def vote_unconfirmed_block(self, peer_id, group_id, block_hash: Hash32, vote_cod if isinstance(consensus, ConsensusSiever) and self._channel_service.state_machine.state == "BlockGenerate": consensus.count_votes(block_hash) - @message_queue_task - async def broadcast_vote(self, vote: VoteMessage): - acceptor = self._channel_service.acceptor - if acceptor.epoch is None: - pass - else: - await acceptor.apply_vote_into_block(vote) + @message_queue_task(type_=MessageQueueType.Worker) + def complain_leader(self, complained_leader_id, new_leader_id, block_height) -> None: + block_manager = self._channel_service.block_manager + util.logger.notice(f"channel_inner_service:complain_leader " + f"complain_leader_id({complained_leader_id}), " + f"new_leader_id({new_leader_id}), " + f"block_height({block_height})") + + block_manager.epoch.add_complain( + complained_leader_id, new_leader_id, block_height + ) + + # consensus = block_manager.consensus_algorithm + # if isinstance(consensus, ConsensusSiever) and self._channel_service.state_machine.state == "BlockGenerate": + # consensus.count_votes(block_hash) @message_queue_task def get_invoke_result(self, tx_hash): diff --git a/loopchain/channel/channel_service.py b/loopchain/channel/channel_service.py index 7688d53e3..7babc041d 100644 --- a/loopchain/channel/channel_service.py +++ b/loopchain/channel/channel_service.py @@ -25,15 +25,14 @@ import loopchain.utils as util from loopchain import configure as conf from loopchain.baseservice import BroadcastScheduler, BroadcastCommand, ObjectManager, CommonSubprocess +from loopchain.baseservice import RestStubManager, NodeSubscriber from loopchain.baseservice import StubManager, PeerManager, PeerStatus, TimerService -from loopchain.baseservice import RestStubManager, Timer, NodeSubscriber from loopchain.blockchain import Block, BlockBuilder, TransactionSerializer from loopchain.channel.channel_inner_service import ChannelInnerService from loopchain.channel.channel_property import ChannelProperty from loopchain.channel.channel_statemachine import ChannelStateMachine -from loopchain.consensus import Consensus, Acceptor, Proposer -from loopchain.peer import BlockManager from loopchain.crypto.signature import Signer +from loopchain.peer import BlockManager from loopchain.protos import loopchain_pb2_grpc, message_code, loopchain_pb2 from loopchain.utils import loggers, command_arguments from loopchain.utils.icon_service import convert_params, ParamType, response_to_json_query @@ -49,9 +48,9 @@ def __init__(self, channel_name, amqp_target, amqp_key): self.__peer_manager: PeerManager = None self.__broadcast_scheduler: BroadcastScheduler = None self.__radio_station_stub = None - self.__consensus: Consensus = None - self.__proposer: Proposer = None - self.__acceptor: Acceptor = None + self.__consensus = None + # self.__proposer: Proposer = None + # self.__acceptor: Acceptor = None self.__timer_service = TimerService() self.__node_subscriber: NodeSubscriber = None @@ -205,14 +204,14 @@ async def init(self, peer_port, peer_target, rest_target, radio_station_target, await self.__init_score_container() await self.__inner_service.connect(conf.AMQP_CONNECTION_ATTEMPS, conf.AMQP_RETRY_DELAY, exclusive=True) - if conf.CONSENSUS_ALGORITHM == conf.ConsensusAlgorithm.lft: - util.logger.spam(f"init consensus !") - # load consensus - self.__init_consensus() - # load proposer - self.__init_proposer(peer_id=peer_id) - # load acceptor - self.__init_acceptor(peer_id=peer_id) + # if conf.CONSENSUS_ALGORITHM == conf.ConsensusAlgorithm.lft: + # util.logger.spam(f"init consensus !") + # # load consensus + # self.__init_consensus() + # # load proposer + # self.__init_proposer(peer_id=peer_id) + # # load acceptor + # self.__init_acceptor(peer_id=peer_id) if self.is_support_node_function(conf.NodeFunction.Vote): if conf.ENABLE_REP_RADIO_STATION: @@ -275,32 +274,32 @@ def __init_block_manager(self): except leveldb.LevelDBError as e: util.exit_and_msg("LevelDBError(" + str(e) + ")") - def __init_consensus(self): - consensus = Consensus(self, ChannelProperty().name) - self.__consensus = consensus - self.__block_manager.consensus = consensus - consensus.register_subscriber(self.__block_manager) - - def __init_proposer(self, peer_id: str): - proposer = Proposer( - name="loopchain.consensus.Proposer", - peer_id=peer_id, - channel=ChannelProperty().name, - channel_service=self - ) - self.__consensus.register_subscriber(proposer) - self.__proposer = proposer - - def __init_acceptor(self, peer_id: str): - acceptor = Acceptor( - name="loopchain.consensus.Acceptor", - consensus=self.__consensus, - peer_id=peer_id, - channel=ChannelProperty().name, - channel_service=self - ) - self.__consensus.register_subscriber(acceptor) - self.__acceptor = acceptor + # def __init_consensus(self): + # consensus = Consensus(self, ChannelProperty().name) + # self.__consensus = consensus + # self.__block_manager.consensus = consensus + # consensus.register_subscriber(self.__block_manager) + # + # def __init_proposer(self, peer_id: str): + # proposer = Proposer( + # name="loopchain.consensus.Proposer", + # peer_id=peer_id, + # channel=ChannelProperty().name, + # channel_service=self + # ) + # self.__consensus.register_subscriber(proposer) + # self.__proposer = proposer + # + # def __init_acceptor(self, peer_id: str): + # acceptor = Acceptor( + # name="loopchain.consensus.Acceptor", + # consensus=self.__consensus, + # peer_id=peer_id, + # channel=ChannelProperty().name, + # channel_service=self + # ) + # self.__consensus.register_subscriber(acceptor) + # self.__acceptor = acceptor def __init_broadcast_scheduler(self): scheduler = BroadcastScheduler(channel=ChannelProperty().name, self_target=ChannelProperty().peer_target) @@ -695,6 +694,7 @@ async def reset_leader(self, new_leader_id, block_height=0): await self.subscribe_to_peer(peer_leader.peer_id, loopchain_pb2.BLOCK_GENERATOR) self.block_manager.set_peer_type(peer_type) + self.block_manager.epoch.set_epoch_leader(peer_leader.peer_id) def set_new_leader(self, new_leader_id, block_height=0): logging.info(f"SET NEW LEADER channel({ChannelProperty().name}) leader_id({new_leader_id})") @@ -836,13 +836,13 @@ def get_object_has_queue_by_consensus(self): return object_has_queue def start_leader_complain_timer(self): - util.logger.notice(f"start_leader_complain_timer in channel service.") + # util.logger.debug(f"start_leader_complain_timer in channel service.") self.__timer_service.add_timer_convenient(timer_key=TimerService.TIMER_KEY_LEADER_COMPLAIN, duration=conf.TIMEOUT_FOR_LEADER_COMPLAIN, is_repeat=True, callback=self.state_machine.leader_complain) def stop_leader_complain_timer(self): - util.logger.notice(f"stop_leader_complain_timer in channel service.") + # util.logger.debug(f"stop_leader_complain_timer in channel service.") self.__timer_service.stop_timer(TimerService.TIMER_KEY_LEADER_COMPLAIN) def start_subscribe_timer(self): diff --git a/loopchain/channel/channel_statemachine.py b/loopchain/channel/channel_statemachine.py index 2fb7866ae..de9b849b7 100644 --- a/loopchain/channel/channel_statemachine.py +++ b/loopchain/channel/channel_statemachine.py @@ -28,15 +28,18 @@ @statemachine.StateMachine("Channel State Machine") class ChannelStateMachine(object): states = ['InitComponents', - State(name='Consensus', ignore_invalid_triggers=True, on_enter='_consensus_on_enter'), - State(name='BlockHeightSync', ignore_invalid_triggers=True, on_enter='_blockheightsync_on_enter'), + State(name='Consensus', ignore_invalid_triggers=True, + on_enter='_consensus_on_enter'), + State(name='BlockHeightSync', ignore_invalid_triggers=True, + on_enter='_blockheightsync_on_enter'), 'EvaluateNetwork', State(name='BlockSync', ignore_invalid_triggers=True, on_enter='_blocksync_on_enter', on_exit='_blocksync_on_exit'), - State(name='SubscribeNetwork', ignore_invalid_triggers=True, on_enter='_subscribe_network_on_enter', - on_exit='_subscribe_network_on_exit'), + State(name='SubscribeNetwork', ignore_invalid_triggers=True, + on_enter='_subscribe_network_on_enter', on_exit='_subscribe_network_on_exit'), 'Watch', - State(name='Vote', ignore_invalid_triggers=True, on_enter='_vote_on_enter', on_exit='_vote_on_exit'), + State(name='Vote', ignore_invalid_triggers=True, + on_enter='_vote_on_enter', on_exit='_vote_on_exit'), State(name='BlockGenerate', ignore_invalid_triggers=True, on_enter='_blockgenerate_on_enter', on_exit='_blockgenerate_on_exit'), State(name='LeaderComplain', ignore_invalid_triggers=True, @@ -82,6 +85,7 @@ def subscribe_network(self): def vote(self): pass + # transition defined in __init__ for multiple conditions. def complete_sync(self): pass @@ -103,12 +107,6 @@ def _is_leader(self): def _has_no_vote_function(self): return not self.__channel_service.is_support_node_function(conf.NodeFunction.Vote) - def _consensus_on_enter(self): - self.block_height_sync() - - def _blockheightsync_on_enter(self): - self.evaluate_network() - def _enter_block_sync(self): self.block_sync() @@ -116,13 +114,6 @@ def _do_block_sync(self): loop = MessageQueueService.loop asyncio.run_coroutine_threadsafe(self.__channel_service.block_height_sync_channel(), loop) - def _blocksync_on_enter(self): - self.__channel_service.block_manager.update_service_status(status_code.Service.block_height_sync) - - def _blocksync_on_exit(self): - self.__channel_service.block_manager.stop_block_height_sync_timer() - self.__channel_service.block_manager.update_service_status(status_code.Service.online) - def _do_evaluate_network(self): loop = MessageQueueService.loop asyncio.run_coroutine_threadsafe(self.__channel_service.evaluate_network(), loop) @@ -131,6 +122,24 @@ def _do_subscribe_network(self): loop = MessageQueueService.loop asyncio.run_coroutine_threadsafe(self.__channel_service.subscribe_network(), loop) + def _do_vote(self): + self.__channel_service.block_manager.vote_as_peer() + + # State handlers { + + def _consensus_on_enter(self): + self.block_height_sync() + + def _blockheightsync_on_enter(self): + self.evaluate_network() + + def _blocksync_on_enter(self): + self.__channel_service.block_manager.update_service_status(status_code.Service.block_height_sync) + + def _blocksync_on_exit(self): + self.__channel_service.block_manager.stop_block_height_sync_timer() + self.__channel_service.block_manager.update_service_status(status_code.Service.online) + def _subscribe_network_on_enter(self): self.__channel_service.start_subscribe_timer() self.__channel_service.start_shutdown_timer() @@ -139,9 +148,6 @@ def _subscribe_network_on_exit(self): self.__channel_service.stop_subscribe_timer() self.__channel_service.stop_shutdown_timer() - def _do_vote(self): - self.__channel_service.block_manager.vote_as_peer() - def _vote_on_enter(self): loggers.get_preset().is_leader = False loggers.get_preset().update_logger() @@ -160,6 +166,9 @@ def _blockgenerate_on_exit(self): def _leadercomplain_on_enter(self): util.logger.notice(f"_leadercomplain_on_enter") + self.__channel_service.block_manager.leader_complain() def _leadercomplain_on_exit(self): util.logger.notice(f"_leadercomplain_on_exit") + + # } diff --git a/loopchain/consensus/__init__.py b/loopchain/consensus/__init__.py index 157790dfe..1d0d98025 100644 --- a/loopchain/consensus/__init__.py +++ b/loopchain/consensus/__init__.py @@ -13,7 +13,6 @@ # limitations under the License. """Package for consensus""" -from .epoch import * from .subscriber import * from .publisher import * from .consensus import * diff --git a/loopchain/consensus/epoch.py b/loopchain/consensus/epoch.py deleted file mode 100644 index b1cec5fa6..000000000 --- a/loopchain/consensus/epoch.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright 2018 ICON Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""An object for a phase of block """ - -import logging -from enum import Enum - -from loopchain.blockchain import Block - - -class EpochStatus(Enum): - unknown = 0 - success = 1 - leader_complain = 2 - - -class Epoch: - def __init__(self, **kwargs): - self.__prev_epoch: 'Epoch' = kwargs.get("prev_epoch", None) - self.__precommit_block: Block = kwargs.get("precommit_block", None) - self.__block_height: int = 1 if self.__precommit_block is None else self.__precommit_block.height + 1 - self.__block_hash: str = kwargs.get("block_hash", None) - self.__quorum: int = kwargs.get("quorum", None) - self.__complain_quorum: int = kwargs.get("complain_quorum", None) - self.__leader_id = kwargs.get("leader_id", None) - self.__fixed_vote_list: dict = {} - self.__agree_vote_list: dict = {} - self.__complain_vote_list: dict = {} - self.__ready_vote_list: dict = {} - self.__status = EpochStatus.unknown - - @property - def prev_epoch(self): - return self.__prev_epoch - - @property - def block_height(self): - return self.__block_height - - @property - def precommit_block(self): - return self.__precommit_block - - @property - def quorum(self): - return self.__quorum - - @property - def complain_quorum(self): - return self.__complain_quorum - - @property - def status(self): - return self.__status - - @status.setter - def status(self, status: EpochStatus): - self.__status = status - - @property - def block_hash(self): - return self.__block_hash - - @block_hash.setter - def block_hash(self, block_hash): - self.__block_hash = block_hash - - @property - def leader_id(self): - return self.__leader_id - - @property - def fixed_vote_list(self): - return self.__fixed_vote_list - - @fixed_vote_list.setter - def fixed_vote_list(self, vote_list: dict): - self.__fixed_vote_list = vote_list - - @property - def agree_vote_list(self): - return self.__agree_vote_list - - @agree_vote_list.setter - def agree_vote_list(self, vote_list: dict): - self.__agree_vote_list = vote_list - - @property - def complain_vote_list(self): - return self.__complain_vote_list - - @complain_vote_list.setter - def complain_vote_list(self, vote_list: dict): - self.__complain_vote_list = vote_list - - @property - def ready_vote_list(self): - return self.__ready_vote_list - - @ready_vote_list.setter - def ready_vote_list(self, vote_list: dict): - self.__ready_vote_list = vote_list - - def set_quorum(self, quorum: int, complain_quorum: int): - logging.debug(f"SET QUORUM : quorum({quorum}), complain_quorum({complain_quorum})") - self.__quorum = quorum - self.__complain_quorum = complain_quorum - - def set_leader(self): - pass - - def change_state(self): - pass diff --git a/loopchain/peer/block_manager.py b/loopchain/peer/block_manager.py index da96d3110..27ecdaf02 100644 --- a/loopchain/peer/block_manager.py +++ b/loopchain/peer/block_manager.py @@ -12,26 +12,41 @@ # See the License for the specific language governing permissions and # limitations under the License. """A management class for blockchain.""" +import json +import logging +import pickle import queue import shutil +import threading import traceback +from collections import namedtuple from concurrent.futures import ThreadPoolExecutor, Future +from typing import TYPE_CHECKING from jsonrpcclient.exceptions import ReceivedErrorResponse -from loopchain.baseservice import TimerService -from loopchain.consensus import * +import loopchain.utils as util +from loopchain import configure as conf +from loopchain.baseservice import TimerService, BlockGenerationScheduler, ObjectManager, Timer +from loopchain.baseservice.aging_cache import AgingCache +from loopchain.blockchain import TransactionStatusInQueue, BlockChain, CandidateBlocks, Block, Epoch, Transaction, \ + TransactionInvalidDuplicatedHash, TransactionInvalidOutOfTimeBound, BlockchainError, Vote, NID, BlockSerializer, \ + exception, BlockVerifier +from loopchain.channel.channel_property import ChannelProperty from loopchain.peer import status_code from loopchain.peer.consensus_siever import ConsensusSiever -from loopchain.protos import loopchain_pb2_grpc +from loopchain.protos import loopchain_pb2_grpc, message_code from loopchain.tools.grpc_helper import GRPCHelper from loopchain.utils.message_queue import StubCollection +if TYPE_CHECKING: + from loopchain.channel.channel_service import ChannelService + # Changing the import location will cause a pickle error. import loopchain_pb2 -class BlockManager(Subscriber): +class BlockManager: """Manage the blockchain of a channel. It has objects for consensus and db object. """ @@ -39,8 +54,6 @@ class BlockManager(Subscriber): TESTNET = "885b8021826f7e741be7f53bb95b48221e9ab263f377e997b2e47a7b8f4a2a8b" def __init__(self, name: str, channel_manager, peer_id, channel_name, level_db_identity): - super().__init__(name) - self.__channel_service: ChannelService = channel_manager self.__channel_name = channel_name self.__pre_validate_strategy = self.__pre_validate @@ -64,14 +77,13 @@ def __init__(self, name: str, channel_manager, peer_id, channel_name, level_db_i self.__block_height_future: Future = None self.__subscribe_target_peer_stub = None self.__block_generation_scheduler = BlockGenerationScheduler(self.__channel_name) - self.__prev_epoch: Epoch = None self.__precommit_block: Block = None - self.__epoch: Epoch = None - self.event_list = [(Consensus.EVENT_COMPLETE_CONSENSUS, self.callback_complete_consensus, 0)] self.set_peer_type(loopchain_pb2.PEER) self.name = name self.__service_status = status_code.Service.online + self.epoch: Epoch = Epoch(self.__blockchain.last_block.header.height + 1 if self.__blockchain.last_block else 1) + @property def channel_name(self): return self.__channel_name @@ -106,7 +118,7 @@ def consensus(self): return self.__consensus @consensus.setter - def consensus(self, consensus: Consensus): + def consensus(self, consensus): self.__consensus = consensus @property @@ -457,7 +469,7 @@ def __block_height_sync(self, target_peer_stub=None, target_height=None): # The adjustment of block height and the process for data synchronization of peer # === Love&Hate Algorithm === # - util.logger.info("try block height sync...with love&hate") + util.logger.debug("try block height sync...with love&hate") # Make Peer Stub List [peer_stub, ...] and get max_height of network # max_height: current max height @@ -547,7 +559,8 @@ def __block_height_sync(self, target_peer_stub=None, target_height=None): return False if my_height >= max_height: - util.logger.info(f"block_manager:block_height_sync is complete.") + util.logger.debug(f"block_manager:block_height_sync is complete.") + self.epoch.set_epoch_leader(self.__channel_service.peer_manager.get_leader_id(conf.ALL_GROUP_ID)) self.__channel_service.state_machine.subscribe_network() else: logging.warning(f"it's not completed block height synchronization in once ...\n" @@ -654,6 +667,26 @@ def stop(self): if self.consensus_algorithm: self.consensus_algorithm.stop() + def leader_complain(self): + complained_leader_id = self.epoch.leader_id + new_leader_id = self.__channel_service.peer_manager.get_next_leader_peer( + current_leader_peer_id=self.epoch.leader_id + ) + + if not isinstance(new_leader_id, str): + new_leader_id = "" + + if not isinstance(complained_leader_id, str): + complained_leader_id = "" + + request = loopchain_pb2.ComplainLeaderRequest( + complained_leader_id=complained_leader_id, + channel=self.channel_name, + new_leader_id=new_leader_id, + block_height=self.epoch.height, + message="I'm your father.") + self.__channel_service.broadcast_scheduler.schedule_broadcast("ComplainLeader", request) + def vote_unconfirmed_block(self, block_hash, is_validated): logging.debug(f"block_manager:vote_unconfirmed_block ({self.channel_name}/{is_validated})") @@ -717,38 +750,3 @@ def vote_as_peer(self): self.candidate_blocks.add_block(unconfirmed_block) finally: self.vote_unconfirmed_block(unconfirmed_block.header.hash, exception is None) - - def callback_complete_consensus(self, **kwargs): - self.__prev_epoch = kwargs.get("prev_epoch", None) - self.__epoch = kwargs.get("epoch", None) - last_block = self.get_blockchain().last_block - last_block_height = last_block.height - - if last_block_height > 0 and self.__precommit_block is None: - logging.error("It's weird what a precommit block is None. " - "That's why a timer can't be added to timer service.") - - if self.__prev_epoch: - if self.__prev_epoch.status == EpochStatus.success: - util.logger.spam(f"BlockManager:callback_complete_consensus::epoch status is success !! " - f"self.__precommit_block({self.__precommit_block})") - - if self.__precommit_block: - if not self.add_block(self.__precommit_block): - self.__precommit_block = self.__blockchain.get_precommit_block() - - self.__precommit_block = kwargs.get("precommit_block", None) - if self.__channel_service.score_write_precommit_state(self.__precommit_block) and \ - self.__blockchain.put_precommit_block(self.__precommit_block): - util.logger.spam(f"start timer :: success precommit block info - {self.__precommit_block.height}") - - elif self.__prev_epoch.status == EpochStatus.leader_complain: - self.__epoch.fixed_vote_list = self.__prev_epoch.ready_vote_list - self.__precommit_block = self.__consensus.precommit_block - self.__prev_epoch = self.__prev_epoch.prev_epoch - util.logger.spam(f"start timer :: fail precommit block info - {self.__precommit_block.height}") - - self.__channel_service.consensus.start_timer(self.__channel_service.acceptor.callback_leader_complain) - else: - util.logger.spam(f"start timer :: after genesis or rebuild block / " - f"precommit block info - {last_block_height}") diff --git a/loopchain/peer/channel_manager.py b/loopchain/peer/channel_manager.py index 72f6eafe2..ae0a940d9 100644 --- a/loopchain/peer/channel_manager.py +++ b/loopchain/peer/channel_manager.py @@ -12,10 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. """ A class for Manage Channels """ +import logging -from loopchain.baseservice import BroadcastScheduler, BroadcastCommand, PeerManager +from loopchain import configure as conf +from loopchain.baseservice import BroadcastScheduler, BroadcastCommand, PeerManager, ObjectManager from loopchain.container import CommonService -from loopchain.consensus import * class ChannelManager: @@ -94,10 +95,10 @@ def broadcast(self, channel, method_name, method_param, response_handler=None, * if channel in self.__broadcast_schedulers.keys(): kwargs = {} - if retry_times is not None: + if retry_times: kwargs['retry_times'] = retry_times - if time is not None: + if timeout: kwargs['timeout'] = timeout self.__broadcast_schedulers[channel].schedule_job( diff --git a/loopchain/peer/consensus_siever.py b/loopchain/peer/consensus_siever.py index d4c4c12a2..f839e4ad3 100644 --- a/loopchain/peer/consensus_siever.py +++ b/loopchain/peer/consensus_siever.py @@ -106,12 +106,15 @@ async def consensus(self): self._blockchain.last_unconfirmed_block = candidate_block broadcast_func = partial(self._blockmanager.broadcast_send_unconfirmed_block, candidate_block) + + # TODO Temporary ignore below line for developing leader complain self.__start_broadcast_send_unconfirmed_block_timer(broadcast_func) if len(block_builder.transactions) == 0 and not conf.ALLOW_MAKE_EMPTY_BLOCK and \ next_leader.hex() != ChannelProperty().peer_id: # util.logger.debug(f"-------------------turn_to_peer") ObjectManager().channel_service.state_machine.turn_to_peer() + self._blockmanager.epoch.set_epoch_leader(next_leader.hex_hx()) else: self.__block_generation_timer.call() diff --git a/loopchain/peer/peer_outer_service.py b/loopchain/peer/peer_outer_service.py index cc1183578..6c0db84f2 100644 --- a/loopchain/peer/peer_outer_service.py +++ b/loopchain/peer/peer_outer_service.py @@ -19,9 +19,8 @@ from loopchain.baseservice import ObjectManager, Monitor, TimerService from loopchain.blockchain import * -from loopchain.consensus.vote_message import * from loopchain.peer import status_code -from loopchain.protos import loopchain_pb2_grpc, message_code +from loopchain.protos import loopchain_pb2_grpc, message_code, ComplainLeaderRequest from loopchain.utils.message_queue import StubCollection # Changing the import location will cause a pickle error. @@ -339,6 +338,19 @@ def Echo(self, request, context): return loopchain_pb2.CommonReply(response_code=message_code.Response.success, message=request.request) + def ComplainLeader(self, request: ComplainLeaderRequest, context): + channel = conf.LOOPCHAIN_DEFAULT_CHANNEL if request.channel == '' else request.channel + util.logger.notice(f"ComplainLeader " + f"height({request.block_height}) complained_peer({request.complained_leader_id})") + + channel_stub = StubCollection().channel_stubs[channel] + channel_stub.sync_task().complain_leader( + complained_leader_id=request.complained_leader_id, + new_leader_id=request.new_leader_id, + block_height=request.block_height) + + return loopchain_pb2.CommonReply(response_code=message_code.Response.success, message="success") + def CreateTx(self, request, context): """make tx by client request and broadcast it to the network @@ -518,32 +530,6 @@ def AnnounceUnconfirmedBlock(self, request, context): channel_stub.sync_task().announce_unconfirmed_block(request.block) return loopchain_pb2.CommonReply(response_code=message_code.Response.success, message="success") - def AnnounceNewBlockForVote(self, request, context): - """수집된 tx 로 생성한 Block 을 각 peer 에 전송하여 검증을 요청한다. - - :param request: - :param context: - :return: - """ - channel_name = conf.LOOPCHAIN_DEFAULT_CHANNEL if request.channel == '' else request.channel - logging.debug(f"peer_outer_service::AnnounceNewBlockForVote channel({channel_name})") - - block: Block = pickle.loads(request.block) - epoch = pickle.loads(request.epoch) - - logging.debug(f"#block \n" - f"epoch({epoch.block_height})\n" - f"prev_epoch({epoch.prev_epoch})\n" - f"block_type({block.block_type})\n" - f"block_hash({block.block_hash})\n" - f"peer_id({block.peer_id})\n" - f"block_type({block.block_type})\n") - - channel_stub = StubCollection().channel_stubs[channel_name] - channel_stub.sync_task().announce_new_block_for_vote(block, epoch) - - return loopchain_pb2.CommonReply(response_code=message_code.Response.success, message="success") - def BlockSync(self, request, context): # Peer To Peer channel_name = conf.LOOPCHAIN_DEFAULT_CHANNEL if request.channel == '' else request.channel @@ -663,19 +649,6 @@ def VoteUnconfirmedBlock(self, request, context): return loopchain_pb2.CommonReply(response_code=message_code.Response.success, message="success") - def BroadcastVote(self, request, context): - channel_name = conf.LOOPCHAIN_DEFAULT_CHANNEL if request.channel == '' else request.channel - vote: VoteMessage = VoteMessage().loads(request.vote_data) - - logging.debug(f"peer_outer_service.py:BroadcastVote :: channel({channel_name})") - logging.info(f"Peer vote to : {vote.block_hash} / {request.vote_code} from {request.peer_id}") - util.logger.spam(f"peer_outer_service.py:BroadcastVote::{vote.print_vote_message()}") - - channel_stub = StubCollection().channel_stubs[request.channel] - channel_stub.sync_task().broadcast_vote(vote) - - return loopchain_pb2.CommonReply(response_code=message_code.Response.success, message="success") - def AnnounceNewLeader(self, request, context): if not request.channel: raise Exception("peer_outer_service:AnnounceNewLeader : Channel is not defined.") diff --git a/loopchain/protos/loopchain.proto b/loopchain/protos/loopchain.proto index 3eaca8b59..59c8940d0 100644 --- a/loopchain/protos/loopchain.proto +++ b/loopchain/protos/loopchain.proto @@ -132,7 +132,8 @@ message ComplainLeaderRequest { required string complained_leader_id = 1; optional string channel = 2; required string new_leader_id = 3; - required string message = 4; + required int32 block_height = 4; + required string message = 5; } diff --git a/testcase/unittest/test_event_system.py b/testcase/unittest/test_event_system.py index 37e239e08..8bc0bcb70 100644 --- a/testcase/unittest/test_event_system.py +++ b/testcase/unittest/test_event_system.py @@ -15,7 +15,10 @@ # See the License for the specific language governing permissions and # limitations under the License. """Test Score Invoke and Query""" +"""Temporary Skip("LFT") +""" +""" import logging import unittest @@ -125,3 +128,4 @@ def __init__(self, event_list: list): def _notify(self, event_name: str): kwargs = {"name": event_name} super()._notify(event_name=event_name, **kwargs) +""" From 1e85f03286c24e6ce5254eb5e0c3f7b1526a2380 Mon Sep 17 00:00:00 2001 From: winDy Date: Mon, 28 Jan 2019 03:43:52 +0900 Subject: [PATCH 18/23] Complain leader --- loopchain/baseservice/peer_manager.py | 7 +++++-- loopchain/blockchain/epoch.py | 18 ++++++++++++---- loopchain/blockchain/vote.py | 18 +++++++++------- loopchain/channel/channel_inner_service.py | 20 ++++++++++++------ loopchain/channel/channel_service.py | 2 ++ loopchain/channel/channel_statemachine.py | 4 ++-- loopchain/configure_default.py | 7 ++++--- loopchain/peer/block_manager.py | 24 +++++++++++++++++++--- loopchain/peer/consensus_siever.py | 3 ++- loopchain/peer/peer_outer_service.py | 5 ++++- loopchain/protos/loopchain.proto | 2 ++ 11 files changed, 81 insertions(+), 29 deletions(-) diff --git a/loopchain/baseservice/peer_manager.py b/loopchain/baseservice/peer_manager.py index 694f26150..b5a7a7a53 100644 --- a/loopchain/baseservice/peer_manager.py +++ b/loopchain/baseservice/peer_manager.py @@ -14,14 +14,15 @@ """A module for managing peer list""" import json import logging +import math import pickle import threading -import math from typing import Union import loopchain.utils as util from loopchain import configure as conf from loopchain.baseservice import BroadcastCommand, ObjectManager, StubManager, PeerStatus, PeerObject, PeerInfo +from loopchain.channel.channel_property import ChannelProperty from loopchain.protos import loopchain_pb2_grpc, message_code # Changing the import location will cause a pickle error. @@ -501,7 +502,9 @@ def announce_new_leader(self, complained_leader_id, new_leader_id, is_broadcast= complained_leader_id=complained_leader_id, channel=self.__channel_name, new_leader_id=new_leader_id, - message="Announce New Leader" + message="Announce New Leader", + peer_id=ChannelProperty().peer_id, + group_id=ChannelProperty().group_id ) # new_leader_peer = self.get_peer(new_leader_id) diff --git a/loopchain/blockchain/epoch.py b/loopchain/blockchain/epoch.py index 7bcbf3627..6f9df021a 100644 --- a/loopchain/blockchain/epoch.py +++ b/loopchain/blockchain/epoch.py @@ -18,6 +18,7 @@ from loopchain.baseservice import ObjectManager from loopchain.blockchain import Vote +from loopchain import configure as conf class Epoch: @@ -43,10 +44,19 @@ def set_epoch_leader(self, leader_id): util.logger.notice(f"Set Epoch leader height({self.height}) leader_id({leader_id})") self.leader_id = leader_id - def add_complain(self, complained_leader_id, new_leader_id, block_height): + def add_complain(self, complained_leader_id, new_leader_id, block_height, peer_id, group_id): util.logger.notice(f"add_complain complain_leader_id({complained_leader_id}), " f"new_leader_id({new_leader_id}), " - f"block_height({block_height})") + f"block_height({block_height}), " + f"peer_id({peer_id})") + self.__complain_vote.add_vote(group_id, peer_id, new_leader_id) - def complain_result(self): - pass + def complain_result(self) -> str or None: + """return new leader id when complete complain leader. + + :return: new leader id or None + """ + vote_result = self.__complain_vote.get_result(Epoch.COMPLAIN_VOTE_HASH, conf.LEADER_COMPLAIN_RATIO) + util.logger.notice(f"complain_result vote_result({vote_result})") + + return vote_result diff --git a/loopchain/blockchain/vote.py b/loopchain/blockchain/vote.py index c7702e2dc..7966f1cb2 100644 --- a/loopchain/blockchain/vote.py +++ b/loopchain/blockchain/vote.py @@ -121,7 +121,7 @@ def get_result_detail(self, block_hash, voting_ratio): agree_vote_group_count = 0 total_vote_group_count = 0 agree_vote_peer_count = 0 - result = False + result = None for group_id in list(self.__votes.keys()): # don't treat with null group @@ -133,10 +133,14 @@ def get_result_detail(self, block_hash, voting_ratio): vote_peer_count_in_group = 0 for peer_id in list(self.__votes[group_id].keys()): total_peer_count_in_group += 1 - if len(self.__votes[group_id][peer_id]) > 0 and self.__votes[group_id][peer_id][0] is True: - agree_peer_count_in_group += 1 - agree_vote_peer_count += 1 - if len(self.__votes[group_id][peer_id]) > 0 and self.__votes[group_id][peer_id][0] is False: + if len(self.__votes[group_id][peer_id]) > 0 and self.__votes[group_id][peer_id][0]: + if result and result != self.__votes[group_id][peer_id][0]: + result = False + else: + agree_peer_count_in_group += 1 + agree_vote_peer_count += 1 + result = self.__votes[group_id][peer_id][0] + if len(self.__votes[group_id][peer_id]) > 0 and not self.__votes[group_id][peer_id][0]: vote_peer_count_in_group += 1 if agree_peer_count_in_group > total_peer_count_in_group * voting_ratio: @@ -146,8 +150,8 @@ def get_result_detail(self, block_hash, voting_ratio): >= total_peer_count_in_group * (1 - voting_ratio): total_vote_group_count += 1 - if agree_vote_group_count > total_group_count * voting_ratio: - result = True + if agree_vote_group_count < total_group_count * voting_ratio: + result = False logging.debug("==result: " + str(result)) logging.debug("=agree_vote_group_count: " + str(agree_vote_group_count)) diff --git a/loopchain/channel/channel_inner_service.py b/loopchain/channel/channel_inner_service.py index a31be9c2f..6c57a6d51 100644 --- a/loopchain/channel/channel_inner_service.py +++ b/loopchain/channel/channel_inner_service.py @@ -437,11 +437,11 @@ def vote_unconfirmed_block(self, peer_id, group_id, block_hash: Hash32, vote_cod ) consensus = block_manager.consensus_algorithm - if isinstance(consensus, ConsensusSiever) and self._channel_service.state_machine.state == "BlockGenerate": + if self._channel_service.state_machine.state == "BlockGenerate": consensus.count_votes(block_hash) @message_queue_task(type_=MessageQueueType.Worker) - def complain_leader(self, complained_leader_id, new_leader_id, block_height) -> None: + def complain_leader(self, complained_leader_id, new_leader_id, block_height, peer_id, group_id) -> None: block_manager = self._channel_service.block_manager util.logger.notice(f"channel_inner_service:complain_leader " f"complain_leader_id({complained_leader_id}), " @@ -449,12 +449,20 @@ def complain_leader(self, complained_leader_id, new_leader_id, block_height) -> f"block_height({block_height})") block_manager.epoch.add_complain( - complained_leader_id, new_leader_id, block_height + complained_leader_id, new_leader_id, block_height, peer_id, group_id ) - # consensus = block_manager.consensus_algorithm - # if isinstance(consensus, ConsensusSiever) and self._channel_service.state_machine.state == "BlockGenerate": - # consensus.count_votes(block_hash) + next_new_leader = block_manager.epoch.complain_result() + if next_new_leader: + self._channel_service.peer_manager.remove_peer(complained_leader_id) + self._channel_service.stop_leader_complain_timer() + if next_new_leader == ChannelProperty().peer_id: + # Turn to Leader and Send Leader Complain Block + util.logger.notice(f"No I'm your father....") + self._channel_service.state_machine.turn_to_leader() + else: + util.logger.notice(f"I'm your Jedi.") + self._channel_service.state_machine.turn_to_peer() @message_queue_task def get_invoke_result(self, tx_hash): diff --git a/loopchain/channel/channel_service.py b/loopchain/channel/channel_service.py index 7babc041d..8cfb5c604 100644 --- a/loopchain/channel/channel_service.py +++ b/loopchain/channel/channel_service.py @@ -221,6 +221,8 @@ async def init(self, peer_port, peer_target, rest_target, radio_station_target, else: self.__init_node_subscriber() + self.block_manager.init_epoch() + async def evaluate_network(self): await self.set_peer_type_in_channel() if self.block_manager.peer_type == loopchain_pb2.BLOCK_GENERATOR: diff --git a/loopchain/channel/channel_statemachine.py b/loopchain/channel/channel_statemachine.py index de9b849b7..df20489e7 100644 --- a/loopchain/channel/channel_statemachine.py +++ b/loopchain/channel/channel_statemachine.py @@ -89,11 +89,11 @@ def vote(self): def complete_sync(self): pass - @statemachine.transition(source=('BlockGenerate', 'Vote'), dest='Vote') + @statemachine.transition(source=('BlockGenerate', 'Vote', 'LeaderComplain'), dest='Vote') def turn_to_peer(self): pass - @statemachine.transition(source=('Vote', 'BlockGenerate'), dest='BlockGenerate') + @statemachine.transition(source=('Vote', 'BlockGenerate', 'LeaderComplain'), dest='BlockGenerate') def turn_to_leader(self): pass diff --git a/loopchain/configure_default.py b/loopchain/configure_default.py index 682eeea6d..1ac8988fb 100644 --- a/loopchain/configure_default.py +++ b/loopchain/configure_default.py @@ -167,8 +167,9 @@ class ConsensusAlgorithm(IntEnum): MAX_TX_COUNT_IN_ADDTX_LIST = 10 # AddTxList can send multiple tx in one message. SEND_TX_LIST_DURATION = 0.3 # seconds USE_ZIPPED_DUMPS = True # Rolling update does not work if this option is different from the running node. -# 블럭이 합의 되는 투표율 1 = 100%, 0.5 = 50% -VOTING_RATIO = 0.66 +# Consensus Vote Ratio 1 = 100%, 0.5 = 50% +VOTING_RATIO = 0.67 # for Add Block +LEADER_COMPLAIN_RATIO = 0.51 # for Leader Complain # Block Height 를 level_db 의 key(bytes)로 변환할때 bytes size BLOCK_HEIGHT_BYTES_LEN = 12 # Block vote timeout @@ -443,5 +444,5 @@ def is_support_node_function(cls, node_function, node_type): CONF_PATH_ICONRPCSERVER_DEV = os.path.join(LOOPCHAIN_ROOT_PATH, 'conf/develop/iconrpcserver_conf.json') CONF_PATH_ICONRPCSERVER_TESTNET = os.path.join(LOOPCHAIN_ROOT_PATH, 'conf/testnet/iconrpcserver_conf.json') CONF_PATH_ICONRPCSERVER_MAINNET = os.path.join(LOOPCHAIN_ROOT_PATH, 'conf/mainnet/iconrpcserver_conf.json') -# TIMEOUT_FOR_LEADER_COMPLAIN = INTERVAL_BLOCKGENERATION * 2 # 2 is test value for developing leader complain +# TIMEOUT_FOR_LEADER_COMPLAIN = INTERVAL_BLOCKGENERATION * 3 # 3 is test value for developing leader complain TIMEOUT_FOR_LEADER_COMPLAIN = INTERVAL_BLOCKGENERATION * 30 # 30 is default for product diff --git a/loopchain/peer/block_manager.py b/loopchain/peer/block_manager.py index 27ecdaf02..34cfc1a6d 100644 --- a/loopchain/peer/block_manager.py +++ b/loopchain/peer/block_manager.py @@ -82,7 +82,7 @@ def __init__(self, name: str, channel_manager, peer_id, channel_name, level_db_i self.name = name self.__service_status = status_code.Service.online - self.epoch: Epoch = Epoch(self.__blockchain.last_block.header.height + 1 if self.__blockchain.last_block else 1) + self.epoch: Epoch = None @property def channel_name(self): @@ -97,6 +97,13 @@ def service_status(self): else: return "Service is offline: " + status_code.get_status_reason(self.__service_status) + def init_epoch(self): + """Call this after peer list update + + :return: + """ + self.epoch = Epoch(self.__blockchain.last_block.header.height + 1 if self.__blockchain.last_block else 1) + def update_service_status(self, status): self.__service_status = status StubCollection().peer_stub.sync_task().update_status( @@ -669,9 +676,10 @@ def stop(self): def leader_complain(self): complained_leader_id = self.epoch.leader_id - new_leader_id = self.__channel_service.peer_manager.get_next_leader_peer( + new_leader = self.__channel_service.peer_manager.get_next_leader_peer( current_leader_peer_id=self.epoch.leader_id ) + new_leader_id = new_leader.peer_id if new_leader else None if not isinstance(new_leader_id, str): new_leader_id = "" @@ -679,12 +687,22 @@ def leader_complain(self): if not isinstance(complained_leader_id, str): complained_leader_id = "" + self.epoch.add_complain( + complained_leader_id, new_leader_id, self.epoch.height, self.__peer_id, ChannelProperty().group_id + ) + request = loopchain_pb2.ComplainLeaderRequest( complained_leader_id=complained_leader_id, channel=self.channel_name, new_leader_id=new_leader_id, block_height=self.epoch.height, - message="I'm your father.") + message="I'm your father.", + peer_id=self.__peer_id, + group_id=ChannelProperty().group_id + ) + + util.logger.notice(f"complain group_id({ChannelProperty().group_id})") + self.__channel_service.broadcast_scheduler.schedule_broadcast("ComplainLeader", request) def vote_unconfirmed_block(self, block_hash, is_validated): diff --git a/loopchain/peer/consensus_siever.py b/loopchain/peer/consensus_siever.py index f839e4ad3..8d3caf1fc 100644 --- a/loopchain/peer/consensus_siever.py +++ b/loopchain/peer/consensus_siever.py @@ -80,7 +80,8 @@ async def consensus(self): self._made_block_count += 1 peer_manager = ObjectManager().channel_service.peer_manager - next_leader = ExternalAddress.fromhex(peer_manager.get_next_leader_peer().peer_id) + next_leader = ExternalAddress.fromhex(peer_manager.get_next_leader_peer( + current_leader_peer_id=ChannelProperty().peer_id).peer_id) else: # util.logger.spam(f"tx count in block({len(block_builder.transactions)})") return self.__block_generation_timer.call() diff --git a/loopchain/peer/peer_outer_service.py b/loopchain/peer/peer_outer_service.py index 6c0db84f2..8e84502bf 100644 --- a/loopchain/peer/peer_outer_service.py +++ b/loopchain/peer/peer_outer_service.py @@ -347,7 +347,10 @@ def ComplainLeader(self, request: ComplainLeaderRequest, context): channel_stub.sync_task().complain_leader( complained_leader_id=request.complained_leader_id, new_leader_id=request.new_leader_id, - block_height=request.block_height) + block_height=request.block_height, + peer_id=request.peer_id, + group_id=request.group_id + ) return loopchain_pb2.CommonReply(response_code=message_code.Response.success, message="success") diff --git a/loopchain/protos/loopchain.proto b/loopchain/protos/loopchain.proto index 59c8940d0..567e2ada7 100644 --- a/loopchain/protos/loopchain.proto +++ b/loopchain/protos/loopchain.proto @@ -134,6 +134,8 @@ message ComplainLeaderRequest { required string new_leader_id = 3; required int32 block_height = 4; required string message = 5; + required string peer_id = 6; + required string group_id = 7; } From 637cfc1147dbc23b8c293cab4485b3e87dd6302e Mon Sep 17 00:00:00 2001 From: Jiyun Park Date: Sun, 27 Jan 2019 16:06:25 +0900 Subject: [PATCH 19/23] [LC-56] add Makefile for setup --- Makefile | 57 +++++++++++++++ README.md | 206 +++++++++++++++++----------------------------------- run_test.sh | 3 - 3 files changed, 124 insertions(+), 142 deletions(-) create mode 100644 Makefile delete mode 100755 run_test.sh diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..1ca5432e7 --- /dev/null +++ b/Makefile @@ -0,0 +1,57 @@ +requirements: + @command -v automake || echo "Error: automake is not installed." + @command -v pkg-config || echo "Error: pkg-config is not installed." + @command -v libtool || echo "Error: libtool is not installed." + @command -v openssl || echo "Error: openssl is not installed." + @if [ "$$(ps -e | grep '[r]abbitmq-server')" = "" ]; then\ + echo "Rabbitmq server is not running locally.";\ + fi + +install: + pip3 install git+https://github.com/icon-project/icon-service.git@master + pip3 install git+https://github.com/icon-project/icon-commons.git@master + pip3 install git+https://github.com/icon-project/icon-rpc-server.git@master + pip3 install tbears + pip3 install -e . + +setup: generate-proto generate-key + +generate-proto: + @echo "Generating python grpc code from proto into > " `pwd` + python3 -m grpc.tools.protoc -I'./loopchain/protos' --python_out='./loopchain/protos' --grpc_python_out='./loopchain/protos' './loopchain/protos/loopchain.proto' + +generate-key: + @mkdir -p resources/my_pki + @echo "Generating private key...." + openssl ecparam -genkey -name secp256k1 | openssl ec -aes-256-cbc -out ./resources/my_pki/my_private.pem + @echo "" + @echo "Generating public key from private key...." + openssl ec -in ./resources/my_pki/my_private.pem -pubout -out ./resources/my_pki/my_public.pem + +check: + @echo "Check Python & Gunicorn & RabbitMQ Process..." + ps -ef | grep loop + ps -ef | grep gunicorn + rabbitmqctl list_queues + +test: + @python3 -m unittest discover testcase/unittest/ -p "test_*.py" || exit -1 + +clean: clean-mq clean-pyc + +clean-mq: + @echo "Cleaning up RabbitMQ..." + @rabbitmqctl stop_app + @rabbitmqctl reset + @rabbitmqctl start_app + +clean-pyc: + @echo "Clear __pycache__" + find . -name '*.pyc' -exec rm -f {} + + find . -name '*.pyo' -exec rm -f {} + + find . -name '*~' -exec rm -f {} + + +clean-db: + @echo "Cleaning up all DB and logs..." + rm -rf .storage* + rm -rf log/ diff --git a/README.md b/README.md index cf90d7a85..11902b81a 100644 --- a/README.md +++ b/README.md @@ -14,141 +14,64 @@ Loopchain development and execution requires following environments. * Windows are not supported yet. * Python - * Make Virtual Env for Python 3.6.5+ (recommended version, 3.7 is not supported) - * check your python version + * Python 3.6.5+ (recommended version, 3.7 is not supported) - ```bash - $ python3 -V - ``` - - > If you'd like to easily manage your python version by each projects, we highly recommend to use **pyenv**. - * Install **pyenv** - ``` - $ curl -L https://github.com/pyenv/pyenv-installer/raw/master/bin/pyenv-installer | bash - ``` - - * Append the following commands to `~/.bash_profile`. - ``` - export PATH="/home/centos/.pyenv/bin:$PATH" - eval "$(pyenv init -)" - eval "$(pyenv virtualenv-init -)" - ``` - * Apply for the profile - - ``` - $ source ~/.bash_profile - ``` - * Install **python 3.6.5** - - ``` - $ pyenv install 3.6.5 - $ pyenv shell 3.6.5 - ``` - - * make virtual env and apply - - ``` - $ virtualenv -p python3 ./venv - $ source ./venv/bin/activate - ``` - -### Setup on MacOS - -* install Xcode command tool -* install brew - - ```bash - (venv) $ /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" - ``` - -* Install third party tools - - ```bash - (venv) $ brew install automake pkg-config libtool leveldb rabbitmq openssl - ``` - -* Setup RabbitMQ - - * increase number of RabbitMQ file descriptors - - > Add the below command to the `rabbitmq-env.conf` file to run the command each time rabbitmq starts. +* Third party tools + + If you're using package manager, you can install all of them through your package manager. + ``` - ulimit -S -n {value: int} + automake pkg-config libtool leveldb rabbitmq openssl ``` - > You may find this file (/usr/local/etc/rabbitmq/rabbitmq-env.conf). - > Recommended value is 2048 or more. (Local test case only) - > You may need to adjust this value depending on your infrastructure environment. - - * start rabbitmq - ```bash - (venv) $ brew services start rabbitmq - (venv) $ rabbitmqctl list_queues - ``` - - * enable rabbitmq web management - - ```bash - (venv) $ rabbitmq-plugins enable rabbitmq_management - ``` - -#### Install requirements - -If you have generated ssh key for github, you can install with below commands. -All libraries are recommended to install the master version of github. +* Check all requirements are installed and started properly ```bash -(venv) $ pip3 install git+ssh://git@github.com/icon-project/icon-service.git@master -(venv) $ pip3 install git+ssh://git@github.com/icon-project/icon-commons.git@master -(venv) $ pip3 install git+ssh://git@github.com/icon-project/icon-rpc-server.git@master -(venv) $ pip3 install -r requirements.txt +$ make requirements ``` -Also, you can install with below commands as well. - -```bash -(venv) $ pip3 install git+https://github.com/icon-project/icon-service.git@master -(venv) $ pip3 install git+https://github.com/icon-project/icon-commons.git@master -(venv) $ pip3 install git+https://github.com/icon-project/icon-rpc-server.git@master -(venv) $ pip3 install -r requirements.txt -``` +If you don't see any error logs and you have started rabbitmq server, you may move on to next step. -#### generate gRPC code -This script generates python gRPC code from protocol buffer which is defined in `loopchain.proto`. +### Install necessary packages ```bash -(venv) $ ./generate_code.sh +$ make install ``` #### Run Unittest After installation, run the unittest by following command line in order to check whether it operates well or not. ```bash -(venv) $ ./run_test.sh +$ make test ``` -## Quick Start +## Quick Start on OS X * [Run Citizen Node on ICON Testnet network](#run-citizen-node-on-icon-testnet-network) * [Run Citizen Node on ICON Mainnet network](#run-citizen-node-on-icon-mainnet-network) ### Run Citizen Node on ICON Testnet network -#### Generate Key +* Setup + + ```bash + $ make setup + $ export PW_icon_dex={ENTER_MY_PASSWORD} + $ export REDIRECT_PROTOCOL=https + ``` -```bash -(venv) $ mkdir -p resources/my_pki -(venv) $ openssl ecparam -genkey -name secp256k1 | openssl ec -aes-256-cbc -out ./resources/my_pki/my_private.pem # generate private key -(venv) $ openssl ec -in ./resources/my_pki/my_private.pem -pubout -out ./resources/my_pki/my_public.pem # generate public key -(venv) $ export PW_icon_dex={ENTER_MY_PASSWORD} -(venv) $ export REDIRECT_PROTOCOL=https -``` + This command is for setting up: + * start rabbitmq + * generates python gRPC code from protocol buffer which is defined in `loopchain.proto` + * generates key for citizen node. -This script will enable ICON citizen node on Testnet network, running on port **9000**. +* Run + +This command will enable ICON citizen node on Testnet network, running on port **9000**. Once it's connected to the network, it will start to sync all the blocks on the ICON testnet network. ```bash -(venv) $ ./loop citizen -r testnet +$ loop citizen -r testnet ``` If you want to browse and search the blocks and transactions in ICON Testnet, please go to [ICON testnet tracker](https://trackerdev.icon.foundation). @@ -158,12 +81,6 @@ If you want to browse and search the blocks and transactions in ICON Testnet, pl T-Bears is a suite of development tools for SCORE and provides the command line interface to interact with the ICON network including all the JSON-RPC v3 APIs. For a detailed usage guideline, please refer to [T-Bears tutorial](https://github.com/icon-project/t-bears). -* Install t-bears - -```bash -(venv) $ pip3 install tbears -``` - ##### Test jsonRPC APIs In v3, parameters in all api request params require the string '0x' at the front. @@ -175,7 +92,7 @@ This method returns the last block the Citizen node has currently synced. usage: tbears lastblock [-h] [-u URI] [-c CONFIG] // Example -(venv) $ tbears lastblock // Example (default uri: http://localhost:9000/api/v3) +$ tbears lastblock // Example (default uri: http://localhost:9000/api/v3) // result block info : { @@ -214,7 +131,7 @@ block info : { usage: tbears blockbyheight [-h] [-u URI] [-c CONFIG] height // Example -(venv) $ tbears blockbyheight 0x1 +$ tbears blockbyheight 0x1 // result block info : { @@ -251,7 +168,7 @@ usage: tbears blockbyhash [-h] [-u URI] [-c CONFIG] hash // Example -(venv) $ tbears blockbyhash 0xce00facd0ac3832e1e6e623d8f4b9344782da881e55abb48d1494fde9e465f78 +$ tbears blockbyhash 0xce00facd0ac3832e1e6e623d8f4b9344782da881e55abb48d1494fde9e465f78 // Result is same as above. ``` @@ -262,7 +179,7 @@ usage: tbears blockbyhash [-h] [-u URI] [-c CONFIG] hash usage: tbears totalsupply [-h] [-u URI] [-c CONFIG] // Example -(venv) $ tbears totalsupply +$ tbears totalsupply // Result Total supply of ICX in hex: 0x2961fff8ca4a62327800000 @@ -277,7 +194,7 @@ Create a keystore file in the given path. Generate a private and public key pair usage: tbears keystore [-h] [-p PASSWORD] path // Example -(venv) $ tbears keystore ./my_keystore.json +$ tbears keystore ./my_keystore.json input your keystore password: (You have to initialize your keystore password) @@ -323,7 +240,7 @@ If you want to load and view your testnet account on ICONex Chrome extension, pl usage: tbears balance [-h] [-u URI] [-c CONFIG] address // Example -(venv) $ tbears balance hx63499c4efc26c9370f6d68132c116d180d441266 +$ tbears balance hx63499c4efc26c9370f6d68132c116d180d441266 // Result balance in hex: {your balance in hex} @@ -355,7 +272,7 @@ optional arguments: value for the "uri" (default: ./tbears_cli_config.json) ``` -We provide the minimal settings for the simple coin transfer in the `sendtx_testnet.json` file. +We provided the minimal settings for the simple coin transfer in the `sendtx_testnet.json` file. The address to which icx is sent(`to`) is the address the ICON developers usually use when testing. You can change the address and the value if you want. ```json // sendtx_testnet.json @@ -377,33 +294,38 @@ The address to which icx is sent(`to`) is the address the ICON developers usuall Example ```bash -(venv) $ tbears sendtx -k my_keystore.json sendtx_testnet.json +$ tbears sendtx -k my_keystore.json sendtx_testnet.json input your keystore password: Send transaction request successfully. -transaction hash: 0xc8a3e3f77f21f8f1177d829cbc4c0ded6fd064cc8e42ef309dacff5c0a952289 +transaction hash: {your tx hash} ``` For the details, please go to [Command-line Interfaces(CLIs)](https://github.com/icon-project/t-bears#command-line-interfacesclis) chapter in t-bears repository. ### Run Citizen Node on ICON Mainnet network -#### Generate Key +* Setup + + ```bash + $ make setup + $ export PW_icon_dex={ENTER_MY_PASSWORD} + $ export REDIRECT_PROTOCOL=https + ``` -```bash -(venv) $ mkdir -p resources/my_pki -(venv) $ openssl ecparam -genkey -name secp256k1 | openssl ec -aes-256-cbc -out ./resources/my_pki/my_private.pem # generate private key -(venv) $ openssl ec -in ./resources/my_pki/my_private.pem -pubout -out ./resources/my_pki/my_public.pem # generate public key -(venv) $ export PW_icon_dex={ENTER_MY_PASSWORD} -(venv) $ export REDIRECT_PROTOCOL=https -``` + This command is for setting up: + * start rabbitmq + * generates python gRPC code from protocol buffer which is defined in `loopchain.proto` + * generates key for citizen node. -This script will enable ICON citizen node on Mainnet network, running on port **9100**. +* Run + +This command below will enable ICON citizen node on Mainnet network, running on port **9100**. Once it's connected to the network, it will start to sync all the blocks on the ICON mainnet network. ```bash -(venv) $ ./loop citizen -r mainnet +$ loop citizen -r mainnet ``` If you want to browse and search the blocks and transactions in ICON Mainnet, please go to [ICON tracker](https://tracker.icon.foundation). @@ -423,7 +345,7 @@ This method returns the last block the Citizen node has currently synced. usage: tbears lastblock [-h] [-u URI] [-c CONFIG] // Example -(venv) $ tbears lastblock -u http://127.0.0.1:9100/api/v3 +$ tbears lastblock -u http://127.0.0.1:9100/api/v3 // result block info : { @@ -462,7 +384,7 @@ block info : { usage: tbears blockbyheight [-h] [-u URI] [-c CONFIG] height // Example -(venv) $ tbears blockbyheight -u http://127.0.0.1:9100/api/v3 0x1 +$ tbears blockbyheight -u http://127.0.0.1:9100/api/v3 0x1 // result block info : { @@ -498,8 +420,7 @@ block info : { usage: tbears blockbyhash [-h] [-u URI] [-c CONFIG] hash // Example - -(venv) $ tbears blockbyhash -u http://127.0.0.1:9100/api/v3 0xce00facd0ac3832e1e6e623d8f4b9344782da881e55abb48d1494fde9e465f78 +$ tbears blockbyhash -u http://127.0.0.1:9100/api/v3 0xce00facd0ac3832e1e6e623d8f4b9344782da881e55abb48d1494fde9e465f78 // Result is same as above. ``` @@ -510,7 +431,7 @@ usage: tbears blockbyhash [-h] [-u URI] [-c CONFIG] hash usage: tbears totalsupply [-h] [-u URI] [-c CONFIG] // Example -(venv) $ tbears totalsupply -u http://127.0.0.1:9100/api/v3 +$ tbears totalsupply -u http://127.0.0.1:9100/api/v3 // Result Total supply of ICX in hex: 0x2961fff8ca4a62327800000 @@ -532,7 +453,7 @@ Total supply of ICX in decimal: 800460000000000000000000000 usage: tbears balance [-h] [-u URI] [-c CONFIG] address // Example -(venv) $ tbears balance -u http://127.0.0.1:9100/api/v3 hx63499c4efc26c9370f6d68132c116d180d441266 +$ tbears balance -u http://127.0.0.1:9100/api/v3 hx63499c4efc26c9370f6d68132c116d180d441266 // Result balance in hex: {your balance in hex} @@ -566,11 +487,18 @@ optional arguments: For the details, please go to [Command-line Interfaces(CLIs)](https://github.com/icon-project/t-bears#command-line-interfacesclis) chapter in t-bears repository. -#### Clean Up (delete log / delete DB) +#### Clean Up +* clear rabbitMQ processes & pycache + +```bash +$ make clean ``` -$ rm -rf log/ -$ rm -rf .storage_test/ .storage_main/ + +* delete log / delete DB + +```bash +$ make clean-db ``` ## License diff --git a/run_test.sh b/run_test.sh deleted file mode 100755 index e12a5fce1..000000000 --- a/run_test.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env bash -python3 -m unittest discover testcase/unittest/ -p "test_*.py" || exit -1 -#python3 -m unittest -q testcase.unittest.test_peer.TestPeer.test_query From b623f877b4de16942198f3be6fb0aa6f73ad0689 Mon Sep 17 00:00:00 2001 From: winDy Date: Mon, 28 Jan 2019 08:16:52 +0900 Subject: [PATCH 20/23] If Peer restart after complain, Peer have to subscribe to other peers. --- docs/1. specification/leader_complain.md | 13 +++++++++++++ loopchain/baseservice/peer_manager.py | 2 +- loopchain/blockchain/epoch.py | 3 +-- loopchain/channel/channel_inner_service.py | 6 ++++-- loopchain/channel/channel_service.py | 15 +++++++++++++++ loopchain/channel/channel_statemachine.py | 6 +++--- loopchain/peer/block_manager.py | 4 +++- loopchain/peer/peer_outer_service.py | 5 +++-- loopchain/protos/loopchain.proto | 2 +- loopchain/utils/loggers/configuration_presets.py | 2 +- 10 files changed, 45 insertions(+), 13 deletions(-) diff --git a/docs/1. specification/leader_complain.md b/docs/1. specification/leader_complain.md index 36c7c2c49..4eeb1c06d 100644 --- a/docs/1. specification/leader_complain.md +++ b/docs/1. specification/leader_complain.md @@ -11,6 +11,19 @@ * Strategy - leader pick strategy in round n. (normal strategy) - pick most picked peer in prev round. + + +#### Terms (new) + * Height + - Increase after add block when consensus complete. + * Epoch + - Candidate blocks, voting and leader complaint management. Handles one block height. And after adding blocks, the new epoch starts. + * Round + - round 0 is first block generation try of each height. + - If leader fail to make a block, round n is to be leader complain round. + * Strategy + - leader pick strategy in round n. (normal strategy) + - pick most picked peer in prev round. #### Complain Process * heartbeat diff --git a/loopchain/baseservice/peer_manager.py b/loopchain/baseservice/peer_manager.py index b5a7a7a53..ce6cdf142 100644 --- a/loopchain/baseservice/peer_manager.py +++ b/loopchain/baseservice/peer_manager.py @@ -161,7 +161,7 @@ def convert_peer_info_item_to_peer_item(item): map(convert_peer_info_item_to_peer_item, self.peer_list_data.peer_info_list[group_id].items()) ) - def __get_peer_by_target(self, peer_target): + def get_peer_by_target(self, peer_target): for group_id in self.peer_list.keys(): for peer_id in self.peer_list[group_id]: peer_each = self.peer_list[group_id][peer_id] diff --git a/loopchain/blockchain/epoch.py b/loopchain/blockchain/epoch.py index 6f9df021a..ea5c70753 100644 --- a/loopchain/blockchain/epoch.py +++ b/loopchain/blockchain/epoch.py @@ -36,8 +36,7 @@ def __init__(self, height: int, leader_id=None): @staticmethod def new_epoch(height: int, leader_id=None): - if leader_id is None and ObjectManager().channel_service.block_manager.epoch.leader_id: - leader_id = ObjectManager().channel_service.block_manager.epoch.leader_id + leader_id = leader_id or ObjectManager().channel_service.block_manager.epoch.leader_id return Epoch(height, leader_id) def set_epoch_leader(self, leader_id): diff --git a/loopchain/channel/channel_inner_service.py b/loopchain/channel/channel_inner_service.py index 6c57a6d51..806877b46 100644 --- a/loopchain/channel/channel_inner_service.py +++ b/loopchain/channel/channel_inner_service.py @@ -28,7 +28,6 @@ BlockSerializer, blocks, Hash32) from loopchain.blockchain.exception import * from loopchain.channel.channel_property import ChannelProperty -from loopchain.peer.consensus_siever import ConsensusSiever from loopchain.protos import loopchain_pb2, message_code if TYPE_CHECKING: @@ -393,6 +392,9 @@ def block_height_sync(self): @message_queue_task(type_=MessageQueueType.Worker) def add_audience(self, peer_target) -> None: + peer = self._channel_service.peer_manager.get_peer_by_target(peer_target) + if not peer: + util.logger.debug(f"There is no peer peer_target({peer_target})") self._channel_service.broadcast_scheduler.schedule_job(BroadcastCommand.SUBSCRIBE, peer_target) @message_queue_task(type_=MessageQueueType.Worker) @@ -454,7 +456,7 @@ def complain_leader(self, complained_leader_id, new_leader_id, block_height, pee next_new_leader = block_manager.epoch.complain_result() if next_new_leader: - self._channel_service.peer_manager.remove_peer(complained_leader_id) + # self._channel_service.peer_manager.remove_peer(complained_leader_id) self._channel_service.stop_leader_complain_timer() if next_new_leader == ChannelProperty().peer_id: # Turn to Leader and Send Leader Complain Block diff --git a/loopchain/channel/channel_service.py b/loopchain/channel/channel_service.py index 8cfb5c604..602333b34 100644 --- a/loopchain/channel/channel_service.py +++ b/loopchain/channel/channel_service.py @@ -218,6 +218,10 @@ async def init(self, peer_port, peer_target, rest_target, radio_station_target, self.connect_to_radio_station() else: await self.__load_peers_from_file() + # subscribe to other peers + self.__subscribe_to_peer_list() + # broadcast AnnounceNewPeer to other peers + # If allow broadcast AnnounceNewPeer here, complained peer can be leader again. else: self.__init_node_subscriber() @@ -457,6 +461,17 @@ def connect_to_radio_station(self, is_reconnect=False): if each_peer.status == PeerStatus.connected: self.__broadcast_scheduler.schedule_job(BroadcastCommand.SUBSCRIBE, each_peer.target) + def __subscribe_to_peer_list(self): + peer_object = self.peer_manager.get_peer(ChannelProperty().peer_id) + peer_request = loopchain_pb2.PeerRequest( + channel=ChannelProperty().name, + peer_target=ChannelProperty().peer_target, + peer_id=ChannelProperty().peer_id, group_id=ChannelProperty().group_id, + node_type=ChannelProperty().node_type, + peer_order=peer_object.order + ) + self.__broadcast_scheduler.schedule_broadcast("Subscribe", peer_request) + async def subscribe_to_radio_station(self): await self.__subscribe_call_to_stub(self.__radio_station_stub, loopchain_pb2.PEER) diff --git a/loopchain/channel/channel_statemachine.py b/loopchain/channel/channel_statemachine.py index df20489e7..f17f6add4 100644 --- a/loopchain/channel/channel_statemachine.py +++ b/loopchain/channel/channel_statemachine.py @@ -153,7 +153,7 @@ def _vote_on_enter(self): loggers.get_preset().update_logger() def _vote_on_exit(self): - # util.logger.notice(f"_vote_on_exit") + # util.logger.debug(f"_vote_on_exit") pass def _blockgenerate_on_enter(self): @@ -165,10 +165,10 @@ def _blockgenerate_on_exit(self): self.__channel_service.block_manager.stop_block_generate_timer() def _leadercomplain_on_enter(self): - util.logger.notice(f"_leadercomplain_on_enter") + util.logger.debug(f"_leadercomplain_on_enter") self.__channel_service.block_manager.leader_complain() def _leadercomplain_on_exit(self): - util.logger.notice(f"_leadercomplain_on_exit") + util.logger.debug(f"_leadercomplain_on_exit") # } diff --git a/loopchain/peer/block_manager.py b/loopchain/peer/block_manager.py index 34cfc1a6d..285371d22 100644 --- a/loopchain/peer/block_manager.py +++ b/loopchain/peer/block_manager.py @@ -270,6 +270,8 @@ def add_unconfirmed_block(self, unconfirmed_block): if unconfirmed_block.body.confirm_prev_block: self.confirm_prev_block(unconfirmed_block) + self.epoch.set_epoch_leader(unconfirmed_block.header.next_leader.hex_hx()) + self.__unconfirmedBlockQueue.put(unconfirmed_block) def add_confirmed_block(self, confirmed_block: Block): @@ -701,7 +703,7 @@ def leader_complain(self): group_id=ChannelProperty().group_id ) - util.logger.notice(f"complain group_id({ChannelProperty().group_id})") + util.logger.debug(f"complain group_id({ChannelProperty().group_id})") self.__channel_service.broadcast_scheduler.schedule_broadcast("ComplainLeader", request) diff --git a/loopchain/peer/peer_outer_service.py b/loopchain/peer/peer_outer_service.py index 8e84502bf..0b4afcdbf 100644 --- a/loopchain/peer/peer_outer_service.py +++ b/loopchain/peer/peer_outer_service.py @@ -570,8 +570,9 @@ def Subscribe(self, request, context): if (request.peer_target in peer_list and conf.ENABLE_CHANNEL_AUTH) or \ (request.node_type == loopchain_pb2.CommunityNode and not conf.ENABLE_CHANNEL_AUTH): channel_stub.sync_task().add_audience(peer_target=request.peer_target) - util.logger.spam(f"peer_outer_service::Subscribe add_audience " - f"target({request.peer_target}) in channel({request.channel})") + util.logger.debug(f"peer_outer_service::Subscribe add_audience " + f"target({request.peer_target}) in channel({request.channel}), " + f"order({request.peer_order})") else: logging.error(f"This target({request.peer_target}, {request.node_type}) failed to subscribe.") return loopchain_pb2.CommonReply(response_code=message_code.get_response_code(message_code.Response.fail), diff --git a/loopchain/protos/loopchain.proto b/loopchain/protos/loopchain.proto index 567e2ada7..7270a6850 100644 --- a/loopchain/protos/loopchain.proto +++ b/loopchain/protos/loopchain.proto @@ -359,7 +359,7 @@ message PeerRequest { optional string channel = 2; required string peer_target = 3; required string group_id = 4; - required PeerType peer_type = 5; + optional PeerType peer_type = 5; optional int32 peer_order = 6; optional bytes peer_object = 7; optional NodeType node_type = 8; diff --git a/loopchain/utils/loggers/configuration_presets.py b/loopchain/utils/loggers/configuration_presets.py index 04ba60dec..5f8970b53 100644 --- a/loopchain/utils/loggers/configuration_presets.py +++ b/loopchain/utils/loggers/configuration_presets.py @@ -72,7 +72,7 @@ def update_preset(update_logger=True): preset.log_monitor_port = conf.MONITOR_LOG_PORT if preset is develop: - preset.log_level = verboselogs.NOTICE + preset.log_level = verboselogs.SPAM else: preset.log_level = conf.LOOPCHAIN_LOG_LEVEL From fcad0e5574c3d88573fd4d3bab8183ee944a7336 Mon Sep 17 00:00:00 2001 From: "Gyeong-Rok.Lee" Date: Tue, 29 Jan 2019 16:54:12 +0900 Subject: [PATCH 21/23] [LC-129] Change max tx size for AddTxList from 10 to 32 Signed-off-by: Gyeong-Rok.Lee --- loopchain/configure_default.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/loopchain/configure_default.py b/loopchain/configure_default.py index f7e8a3f2f..011573221 100644 --- a/loopchain/configure_default.py +++ b/loopchain/configure_default.py @@ -164,7 +164,7 @@ class ConsensusAlgorithm(IntEnum): MAX_BLOCK_KBYTES = 3000 # default: 3000 # The total size of the transactions in a block. MAX_TX_SIZE_IN_BLOCK = 1 * 1024 * 1024 # 1 MB is better than 2 MB (because tx invoke need CPU time) -MAX_TX_COUNT_IN_ADDTX_LIST = 10 # AddTxList can send multiple tx in one message. +MAX_TX_COUNT_IN_ADDTX_LIST = 32 # AddTxList can send multiple tx in one message. SEND_TX_LIST_DURATION = 0.3 # seconds USE_ZIPPED_DUMPS = True # Rolling update does not work if this option is different from the running node. # 블럭이 합의 되는 투표율 1 = 100%, 0.5 = 50% From b1006f42d1959cd6df13d21ef4a9430d69050f16 Mon Sep 17 00:00:00 2001 From: Jiyun Park Date: Tue, 29 Jan 2019 16:39:58 +0900 Subject: [PATCH 22/23] rollback run_test.sh for CI test --- Makefile | 8 ++++---- README.md | 6 +++--- run_test.sh | 5 +++++ 3 files changed, 12 insertions(+), 7 deletions(-) create mode 100755 run_test.sh diff --git a/Makefile b/Makefile index 1ca5432e7..db8da65ca 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,8 @@ requirements: - @command -v automake || echo "Error: automake is not installed." - @command -v pkg-config || echo "Error: pkg-config is not installed." - @command -v libtool || echo "Error: libtool is not installed." - @command -v openssl || echo "Error: openssl is not installed." + @command -v automake > /dev/null || echo "Error: automake is not installed." + @command -v pkg-config > /dev/null || echo "Error: pkg-config is not installed." + @command -v libtool > /dev/null || echo "Error: libtool is not installed." + @command -v openssl > /dev/null || echo "Error: openssl is not installed." @if [ "$$(ps -e | grep '[r]abbitmq-server')" = "" ]; then\ echo "Rabbitmq server is not running locally.";\ fi diff --git a/README.md b/README.md index 11902b81a..51666f7fd 100644 --- a/README.md +++ b/README.md @@ -26,9 +26,9 @@ Loopchain development and execution requires following environments. * Check all requirements are installed and started properly -```bash -$ make requirements -``` + ```bash + $ make requirements + ``` If you don't see any error logs and you have started rabbitmq server, you may move on to next step. diff --git a/run_test.sh b/run_test.sh new file mode 100755 index 000000000..1694b473b --- /dev/null +++ b/run_test.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash +# This file will be deleted after changing the test method in CI. + +python3 -m unittest discover testcase/unittest/ -p "test_*.py" || exit -1 +#python3 -m unittest -q testcase.unittest.test_peer.TestPeer.test_query \ No newline at end of file From 769fe554422f14563cf762c9d3d2dfb3b31b318b Mon Sep 17 00:00:00 2001 From: windies21 Date: Tue, 29 Jan 2019 17:28:58 +0900 Subject: [PATCH 23/23] Update VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index e4a973f91..227cea215 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.24.2 +2.0.0