diff --git a/contribs/gnobro/go.mod b/contribs/gnobro/go.mod index 436e7968553..3765875629a 100644 --- a/contribs/gnobro/go.mod +++ b/contribs/gnobro/go.mod @@ -60,6 +60,7 @@ require ( github.com/emicklei/dot v1.6.2 // indirect github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect github.com/getsentry/sentry-go v0.27.0 // indirect + github.com/go-chi/chi/v5 v5.2.3 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -85,6 +86,7 @@ require ( github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect github.com/muesli/cancelreader v0.2.2 // indirect github.com/muesli/termenv v0.16.0 // indirect + github.com/olahol/melody v1.4.0 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/peterbourgon/ff/v3 v3.4.0 // indirect diff --git a/contribs/gnobro/go.sum b/contribs/gnobro/go.sum index f1a2876ae25..8f4c1be0cf4 100644 --- a/contribs/gnobro/go.sum +++ b/contribs/gnobro/go.sum @@ -140,6 +140,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= @@ -239,6 +241,8 @@ github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3 github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/olahol/melody v1.4.0 h1:Pa5SdeZL/zXPi1tJuMAPDbl4n3gQOThSL6G1p4qZ4SI= +github.com/olahol/melody v1.4.0/go.mod h1:GgkTl6Y7yWj/HtfD48Q5vLKPVoZOH+Qqgfa7CvJgJM4= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= diff --git a/contribs/gnodev/go.mod b/contribs/gnodev/go.mod index c1850d1ff7b..8cce943a905 100644 --- a/contribs/gnodev/go.mod +++ b/contribs/gnodev/go.mod @@ -46,6 +46,7 @@ require ( github.com/dlclark/regexp2 v1.11.4 // indirect github.com/emicklei/dot v1.6.2 // indirect github.com/getsentry/sentry-go v0.27.0 // indirect + github.com/go-chi/chi/v5 v5.2.3 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -64,6 +65,7 @@ require ( github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/olahol/melody v1.4.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/peterbourgon/ff/v3 v3.4.0 // indirect github.com/pkg/errors v0.9.1 // indirect diff --git a/contribs/gnodev/go.sum b/contribs/gnodev/go.sum index 7a983abcfab..e556f94bf4a 100644 --- a/contribs/gnodev/go.sum +++ b/contribs/gnodev/go.sum @@ -110,6 +110,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= @@ -191,6 +193,8 @@ github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3 github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/olahol/melody v1.4.0 h1:Pa5SdeZL/zXPi1tJuMAPDbl4n3gQOThSL6G1p4qZ4SI= +github.com/olahol/melody v1.4.0/go.mod h1:GgkTl6Y7yWj/HtfD48Q5vLKPVoZOH+Qqgfa7CvJgJM4= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= diff --git a/contribs/gnogenesis/go.mod b/contribs/gnogenesis/go.mod index 35560ca4ac9..38e8b55091f 100644 --- a/contribs/gnogenesis/go.mod +++ b/contribs/gnogenesis/go.mod @@ -29,6 +29,7 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/emicklei/dot v1.6.2 // indirect github.com/getsentry/sentry-go v0.27.0 // indirect + github.com/go-chi/chi/v5 v5.2.3 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gofrs/flock v0.12.1 // indirect @@ -45,6 +46,7 @@ require ( github.com/kr/text v0.2.0 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/olahol/melody v1.4.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/peterbourgon/ff/v3 v3.4.0 // indirect github.com/pkg/errors v0.9.1 // indirect diff --git a/contribs/gnogenesis/go.sum b/contribs/gnogenesis/go.sum index 4a62c7e0277..eaa3bbc3bd4 100644 --- a/contribs/gnogenesis/go.sum +++ b/contribs/gnogenesis/go.sum @@ -80,6 +80,8 @@ github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwV github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -142,6 +144,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zk github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/olahol/melody v1.4.0 h1:Pa5SdeZL/zXPi1tJuMAPDbl4n3gQOThSL6G1p4qZ4SI= +github.com/olahol/melody v1.4.0/go.mod h1:GgkTl6Y7yWj/HtfD48Q5vLKPVoZOH+Qqgfa7CvJgJM4= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= diff --git a/contribs/gnohealth/go.mod b/contribs/gnohealth/go.mod index 243008a367e..41a3d41f7e0 100644 --- a/contribs/gnohealth/go.mod +++ b/contribs/gnohealth/go.mod @@ -19,6 +19,7 @@ require ( github.com/cosmos/ics23/go v0.11.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect + github.com/go-chi/chi/v5 v5.2.3 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/go-cmp v0.7.0 // indirect @@ -26,6 +27,7 @@ require ( github.com/gorilla/websocket v1.5.3 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/olahol/melody v1.4.0 // indirect github.com/peterbourgon/ff/v3 v3.4.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_golang v1.23.0 // indirect diff --git a/contribs/gnohealth/go.sum b/contribs/gnohealth/go.sum index abe1d921e72..f6a372e931b 100644 --- a/contribs/gnohealth/go.sum +++ b/contribs/gnohealth/go.sum @@ -69,6 +69,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -117,6 +119,8 @@ github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QT github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/olahol/melody v1.4.0 h1:Pa5SdeZL/zXPi1tJuMAPDbl4n3gQOThSL6G1p4qZ4SI= +github.com/olahol/melody v1.4.0/go.mod h1:GgkTl6Y7yWj/HtfD48Q5vLKPVoZOH+Qqgfa7CvJgJM4= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= diff --git a/contribs/gnokeykc/go.mod b/contribs/gnokeykc/go.mod index 0d35985058a..a42c220f28c 100644 --- a/contribs/gnokeykc/go.mod +++ b/contribs/gnokeykc/go.mod @@ -27,6 +27,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/emicklei/dot v1.6.2 // indirect + github.com/go-chi/chi/v5 v5.2.3 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect @@ -39,6 +40,7 @@ require ( github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/olahol/melody v1.4.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/peterbourgon/ff/v3 v3.4.0 // indirect github.com/pkg/errors v0.9.1 // indirect diff --git a/contribs/gnokeykc/go.sum b/contribs/gnokeykc/go.sum index cc01ad5cf47..3c63a13778c 100644 --- a/contribs/gnokeykc/go.sum +++ b/contribs/gnokeykc/go.sum @@ -81,6 +81,8 @@ github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwV github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -140,6 +142,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/olahol/melody v1.4.0 h1:Pa5SdeZL/zXPi1tJuMAPDbl4n3gQOThSL6G1p4qZ4SI= +github.com/olahol/melody v1.4.0/go.mod h1:GgkTl6Y7yWj/HtfD48Q5vLKPVoZOH+Qqgfa7CvJgJM4= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= diff --git a/contribs/gnomigrate/go.mod b/contribs/gnomigrate/go.mod index 2a7b6bfc91a..27163d9ec6f 100644 --- a/contribs/gnomigrate/go.mod +++ b/contribs/gnomigrate/go.mod @@ -30,6 +30,7 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/emicklei/dot v1.6.2 // indirect github.com/getsentry/sentry-go v0.27.0 // indirect + github.com/go-chi/chi/v5 v5.2.3 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gofrs/flock v0.12.1 // indirect @@ -46,6 +47,7 @@ require ( github.com/kr/text v0.2.0 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/olahol/melody v1.4.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/peterbourgon/ff/v3 v3.4.0 // indirect github.com/pkg/errors v0.9.1 // indirect diff --git a/contribs/gnomigrate/go.sum b/contribs/gnomigrate/go.sum index ef729bdf524..6ea28429b23 100644 --- a/contribs/gnomigrate/go.sum +++ b/contribs/gnomigrate/go.sum @@ -81,6 +81,8 @@ github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwV github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -143,6 +145,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zk github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/olahol/melody v1.4.0 h1:Pa5SdeZL/zXPi1tJuMAPDbl4n3gQOThSL6G1p4qZ4SI= +github.com/olahol/melody v1.4.0/go.mod h1:GgkTl6Y7yWj/HtfD48Q5vLKPVoZOH+Qqgfa7CvJgJM4= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= diff --git a/contribs/tx-archive/go.mod b/contribs/tx-archive/go.mod index 7fadbd287f4..9b04ffdbde0 100644 --- a/contribs/tx-archive/go.mod +++ b/contribs/tx-archive/go.mod @@ -33,6 +33,7 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect github.com/emicklei/dot v1.6.2 // indirect github.com/getsentry/sentry-go v0.27.0 // indirect + github.com/go-chi/chi/v5 v5.2.3 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gofrs/flock v0.12.1 // indirect @@ -49,6 +50,7 @@ require ( github.com/kr/text v0.2.0 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/olahol/melody v1.4.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect diff --git a/contribs/tx-archive/go.sum b/contribs/tx-archive/go.sum index 5a19e44339e..4e3c5fb7e92 100644 --- a/contribs/tx-archive/go.sum +++ b/contribs/tx-archive/go.sum @@ -81,6 +81,8 @@ github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwV github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -144,6 +146,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zk github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/olahol/melody v1.4.0 h1:Pa5SdeZL/zXPi1tJuMAPDbl4n3gQOThSL6G1p4qZ4SI= +github.com/olahol/melody v1.4.0/go.mod h1:GgkTl6Y7yWj/HtfD48Q5vLKPVoZOH+Qqgfa7CvJgJM4= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= diff --git a/go.mod b/go.mod index 5037dd1db7b..a58677e6d20 100644 --- a/go.mod +++ b/go.mod @@ -18,12 +18,15 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 github.com/emicklei/dot v1.6.2 github.com/fortytw2/leaktest v1.3.0 + github.com/go-chi/chi/v5 v5.2.3 github.com/gofrs/flock v0.12.1 github.com/golang/mock v1.6.0 github.com/google/gofuzz v1.2.0 + github.com/google/uuid v1.6.0 github.com/gorilla/websocket v1.5.3 github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/libp2p/go-buffer-pool v0.1.0 + github.com/olahol/melody v1.4.0 github.com/pelletier/go-toml v1.9.5 github.com/peterbourgon/ff/v3 v3.4.0 github.com/pmezard/go-difflib v1.0.0 @@ -76,7 +79,6 @@ require ( github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/go-cmp v0.6.0 // indirect - github.com/google/uuid v1.6.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect github.com/klauspost/compress v1.16.0 // indirect github.com/kr/pretty v0.3.1 // indirect diff --git a/go.sum b/go.sum index ab39e34afbb..774d4a0693a 100644 --- a/go.sum +++ b/go.sum @@ -94,6 +94,8 @@ github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwV github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -158,6 +160,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zk github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/olahol/melody v1.4.0 h1:Pa5SdeZL/zXPi1tJuMAPDbl4n3gQOThSL6G1p4qZ4SI= +github.com/olahol/melody v1.4.0/go.mod h1:GgkTl6Y7yWj/HtfD48Q5vLKPVoZOH+Qqgfa7CvJgJM4= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= diff --git a/misc/autocounterd/go.mod b/misc/autocounterd/go.mod index 3b1562b37ab..e9838b1133e 100644 --- a/misc/autocounterd/go.mod +++ b/misc/autocounterd/go.mod @@ -15,6 +15,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/emicklei/dot v1.6.2 // indirect + github.com/go-chi/chi/v5 v5.2.3 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gofrs/flock v0.12.1 // indirect @@ -26,6 +27,7 @@ require ( github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/olahol/melody v1.4.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/peterbourgon/ff/v3 v3.4.0 // indirect github.com/pkg/errors v0.9.1 // indirect diff --git a/misc/autocounterd/go.sum b/misc/autocounterd/go.sum index c34609d099f..e93fec85f9a 100644 --- a/misc/autocounterd/go.sum +++ b/misc/autocounterd/go.sum @@ -77,6 +77,8 @@ github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwV github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -134,6 +136,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zk github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/olahol/melody v1.4.0 h1:Pa5SdeZL/zXPi1tJuMAPDbl4n3gQOThSL6G1p4qZ4SI= +github.com/olahol/melody v1.4.0/go.mod h1:GgkTl6Y7yWj/HtfD48Q5vLKPVoZOH+Qqgfa7CvJgJM4= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= diff --git a/misc/loop/go.mod b/misc/loop/go.mod index 380edbef266..76a0b9a0495 100644 --- a/misc/loop/go.mod +++ b/misc/loop/go.mod @@ -40,6 +40,7 @@ require ( github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/getsentry/sentry-go v0.35.0 // indirect + github.com/go-chi/chi/v5 v5.2.3 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gofrs/flock v0.12.1 // indirect @@ -57,6 +58,7 @@ require ( github.com/moby/term v0.5.0 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/olahol/melody v1.4.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect diff --git a/misc/loop/go.sum b/misc/loop/go.sum index c16d1a479a7..e3dadedb750 100644 --- a/misc/loop/go.sum +++ b/misc/loop/go.sum @@ -99,6 +99,8 @@ github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/getsentry/sentry-go v0.35.0 h1:+FJNlnjJsZMG3g0/rmmP7GiKjQoUF5EXfEtBwtPtkzY= github.com/getsentry/sentry-go v0.35.0/go.mod h1:C55omcY9ChRQIUcVcGcs+Zdy4ZpQGvNJ7JYHIoSWOtE= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -165,6 +167,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/olahol/melody v1.4.0 h1:Pa5SdeZL/zXPi1tJuMAPDbl4n3gQOThSL6G1p4qZ4SI= +github.com/olahol/melody v1.4.0/go.mod h1:GgkTl6Y7yWj/HtfD48Q5vLKPVoZOH+Qqgfa7CvJgJM4= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= diff --git a/misc/stress-test/stress-test-many-posts/go.mod b/misc/stress-test/stress-test-many-posts/go.mod index dd26f99c422..228c7724c72 100644 --- a/misc/stress-test/stress-test-many-posts/go.mod +++ b/misc/stress-test/stress-test-many-posts/go.mod @@ -23,6 +23,7 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/emicklei/dot v1.6.2 // indirect github.com/gnolang/gno v0.1.2-0.20240826090356-651f5aac3706 // indirect + github.com/go-chi/chi/v5 v5.2.3 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gofrs/flock v0.12.1 // indirect @@ -34,6 +35,7 @@ require ( github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/olahol/melody v1.4.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect diff --git a/misc/stress-test/stress-test-many-posts/go.sum b/misc/stress-test/stress-test-many-posts/go.sum index 319505bff4a..195f106c296 100644 --- a/misc/stress-test/stress-test-many-posts/go.sum +++ b/misc/stress-test/stress-test-many-posts/go.sum @@ -86,6 +86,8 @@ github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= github.com/gnolang/gnonative/v4 v4.2.2 h1:MxhXQBapoWM42llE5IrU6IM743AKKXQAimFMEdrJIUI= github.com/gnolang/gnonative/v4 v4.2.2/go.mod h1:78NvbayMU0oV1yYLfQEQpVfLlLFWlADIuFhBgYhZPSk= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -146,6 +148,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zk github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/olahol/melody v1.4.0 h1:Pa5SdeZL/zXPi1tJuMAPDbl4n3gQOThSL6G1p4qZ4SI= +github.com/olahol/melody v1.4.0/go.mod h1:GgkTl6Y7yWj/HtfD48Q5vLKPVoZOH+Qqgfa7CvJgJM4= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= diff --git a/tm2/pkg/bft/rpc/core/abci.go b/tm2/pkg/bft/rpc/core/abci.go deleted file mode 100644 index aef90052f58..00000000000 --- a/tm2/pkg/bft/rpc/core/abci.go +++ /dev/null @@ -1,112 +0,0 @@ -package core - -import ( - abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" - ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - rpctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" -) - -// Query the application for some information. -// -// ```shell -// curl 'localhost:26657/abci_query?path=""&data="abcd"&prove=false' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// result, err := client.ABCIQuery("", "abcd", true) -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// -// { -// "error": "", -// "result": { -// "response": { -// "log": "exists", -// "height": "0", -// "proof": "010114FED0DAD959F36091AD761C922ABA3CBF1D8349990101020103011406AA2262E2F448242DF2C2607C3CDC705313EE3B0001149D16177BC71E445476174622EA559715C293740C", -// "value": "61626364", -// "key": "61626364", -// "index": "-1", -// "code": "0" -// } -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// -// ``` -// -// ### Query Parameters -// -// | Parameter | Type | Default | Required | Description | -// |-----------+--------+---------+----------+------------------------------------------------| -// | path | string | false | false | Path to the data ("/a/b/c") | -// | data | []byte | false | true | Data | -// | height | int64 | 0 | false | Height (0 means latest) | -// | prove | bool | false | false | Includes proof if true | -func ABCIQuery(ctx *rpctypes.Context, path string, data []byte, height int64, prove bool) (*ctypes.ResultABCIQuery, error) { - resQuery, err := proxyAppQuery.QuerySync(abci.RequestQuery{ - Path: path, - Data: data, - Height: height, - Prove: prove, - }) - if err != nil { - return nil, err - } - logger.Info("ABCIQuery", "path", path, "data", data, "result", resQuery) - return &ctypes.ResultABCIQuery{Response: resQuery}, nil -} - -// Get some info about the application. -// -// ```shell -// curl 'localhost:26657/abci_info' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// info, err := client.ABCIInfo() -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// -// { -// "error": "", -// "result": { -// "response": { -// "data": "{\"size\":3}" -// } -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// -// ``` -func ABCIInfo(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) { - resInfo, err := proxyAppQuery.InfoSync(abci.RequestInfo{}) - if err != nil { - return nil, err - } - return &ctypes.ResultABCIInfo{Response: resInfo}, nil -} diff --git a/tm2/pkg/bft/rpc/core/abci/abci.go b/tm2/pkg/bft/rpc/core/abci/abci.go new file mode 100644 index 00000000000..d8ea630c703 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/abci/abci.go @@ -0,0 +1,87 @@ +package abci + +import ( + abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" + "github.com/gnolang/gno/tm2/pkg/bft/appconn" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/params" + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/metadata" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" +) + +// Handler is the ABCI RPC handler +type Handler struct { + proxyAppQuery appconn.Query +} + +// NewHandler creates a new instance of the ABCI RPC handler +func NewHandler(proxyAppQuery appconn.Query) *Handler { + return &Handler{ + proxyAppQuery: proxyAppQuery, + } +} + +// QueryHandler queries the application (synchronously) for some information +// +// Params: +// - path string (optional, default "") +// - data []byte (required) +// - height int64 (optional, default 0) +// - prove bool (optional, default false) +func (h *Handler) QueryHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + const ( + idxPath = 0 + idxData = 1 + idxHeight = 2 + idxProve = 3 + ) + + path, err := params.AsString(p, idxPath) + if err != nil { + return nil, err + } + + data, err := params.AsBytes(p, idxData, true) + if err != nil { + return nil, err + } + + height, err := params.AsInt64(p, idxHeight) + if err != nil { + return nil, err + } + + prove, err := params.AsBool(p, idxProve) + if err != nil { + return nil, err + } + + resQuery, queryErr := h.proxyAppQuery.QuerySync(abci.RequestQuery{ + Path: path, + Data: data, + Height: height, + Prove: prove, + }) + if queryErr != nil { + return nil, spec.GenerateResponseError(queryErr) + } + + return &ctypes.ResultABCIQuery{Response: resQuery}, nil +} + +// InfoHandler gets some info about the application. +// +// No params +func (h *Handler) InfoHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + // Make sure there are no params + if len(p) > 0 { + return nil, spec.GenerateInvalidParamError(1) + } + + resInfo, err := h.proxyAppQuery.InfoSync(abci.RequestInfo{}) + if err != nil { + return nil, spec.GenerateResponseError(err) + } + + return &ctypes.ResultABCIInfo{Response: resInfo}, nil +} diff --git a/tm2/pkg/bft/rpc/core/abci/abci_test.go b/tm2/pkg/bft/rpc/core/abci/abci_test.go new file mode 100644 index 00000000000..83b6501710a --- /dev/null +++ b/tm2/pkg/bft/rpc/core/abci/abci_test.go @@ -0,0 +1,239 @@ +package abci + +import ( + "errors" + "testing" + + abciTypes "github.com/gnolang/gno/tm2/pkg/bft/abci/types" + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestHandler_QueryHandler(t *testing.T) { + t.Parallel() + + t.Run("Missing data param", func(t *testing.T) { + t.Parallel() + + var ( + mockQuery = &mockQuery{ + querySyncFn: func(_ abciTypes.RequestQuery) (abciTypes.ResponseQuery, error) { + t.FailNow() + + return abciTypes.ResponseQuery{}, nil + }, + } + + params = []any{ + "some/path", + } + ) + + h := NewHandler(mockQuery) + + res, err := h.QueryHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Query sync error", func(t *testing.T) { + t.Parallel() + + var ( + queryErr = errors.New("app query error") + params = []any{ + "some/path", + []byte("data"), + } + + mockQuery = &mockQuery{ + querySyncFn: func(_ abciTypes.RequestQuery) (abciTypes.ResponseQuery, error) { + return abciTypes.ResponseQuery{}, queryErr + }, + } + ) + + h := NewHandler(mockQuery) + + res, err := h.QueryHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + assert.Contains(t, err.Message, queryErr.Error()) + }) + + t.Run("Valid query", func(t *testing.T) { + t.Parallel() + + var ( + height = int64(10) + expectedResponse = abciTypes.ResponseQuery{ + Height: height, + } + + params = []any{ + "some/path", // path + []byte("payload"), // data + height, // height + true, // prove + } + + expectedRequest = abciTypes.RequestQuery{ + Path: "some/path", + Data: []byte("payload"), + Height: 10, + Prove: true, + } + + mockQuery = &mockQuery{ + querySyncFn: func(req abciTypes.RequestQuery) (abciTypes.ResponseQuery, error) { + assert.Equal(t, expectedRequest, req) + + return expectedResponse, nil + }, + } + ) + + h := NewHandler(mockQuery) + + res, err := h.QueryHandler(nil, params) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultABCIQuery) + require.True(t, ok) + + assert.Equal(t, expectedResponse, result.Response) + }) + + t.Run("Valid query with defaults", func(t *testing.T) { + t.Parallel() + + var ( + params = []any{ + // path="", height=0, prove=false defaults + nil, + []byte("data-only"), + } + expectedRequest = abciTypes.RequestQuery{ + Path: "", + Data: []byte("data-only"), + Height: 0, + Prove: false, + } + expectedResponse = abciTypes.ResponseQuery{} + + mockQuery = &mockQuery{ + querySyncFn: func(req abciTypes.RequestQuery) (abciTypes.ResponseQuery, error) { + assert.Equal(t, expectedRequest, req) + + return expectedResponse, nil + }, + } + ) + + h := NewHandler(mockQuery) + + res, err := h.QueryHandler(nil, params) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultABCIQuery) + require.True(t, ok) + + assert.Equal(t, expectedResponse, result.Response) + }) +} + +func TestHandler_InfoHandler(t *testing.T) { + t.Parallel() + + t.Run("Params not allowed", func(t *testing.T) { + t.Parallel() + + var ( + params = []any{"unexpected"} + + mockQuery = &mockQuery{ + infoSyncFn: func(_ abciTypes.RequestInfo) (abciTypes.ResponseInfo, error) { + t.FailNow() + + return abciTypes.ResponseInfo{}, nil + }, + } + ) + + h := NewHandler(mockQuery) + + res, err := h.InfoHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Info error", func(t *testing.T) { + t.Parallel() + + var ( + infoErr = errors.New("info failed") + params = []any(nil) + + mockQuery = &mockQuery{ + infoSyncFn: func(req abciTypes.RequestInfo) (abciTypes.ResponseInfo, error) { + // The request should always be empty + assert.Equal(t, abciTypes.RequestInfo{}, req) + + return abciTypes.ResponseInfo{}, infoErr + }, + } + ) + + h := NewHandler(mockQuery) + + res, err := h.InfoHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + assert.Contains(t, err.Message, infoErr.Error()) + }) + + t.Run("Valid info", func(t *testing.T) { + t.Parallel() + + var ( + expectedResponse = abciTypes.ResponseInfo{ + ResponseBase: abciTypes.ResponseBase{ + Data: []byte("some-info"), + }, + ABCIVersion: "v1.2.3", + } + + mockQuery = &mockQuery{ + infoSyncFn: func(req abciTypes.RequestInfo) (abciTypes.ResponseInfo, error) { + // The request should always be empty + assert.Equal(t, abciTypes.RequestInfo{}, req) + + return expectedResponse, nil + }, + } + ) + + h := NewHandler(mockQuery) + + res, err := h.InfoHandler(nil, nil) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultABCIInfo) + require.True(t, ok) + + assert.Equal(t, expectedResponse, result.Response) + }) +} diff --git a/tm2/pkg/bft/rpc/core/abci/mock_test.go b/tm2/pkg/bft/rpc/core/abci/mock_test.go new file mode 100644 index 00000000000..5e2660d0116 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/abci/mock_test.go @@ -0,0 +1,51 @@ +package abci + +import ( + abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" +) + +type ( + errorDelegate func() error + echoSyncDelegate func(string) (abci.ResponseEcho, error) + infoSyncDelegate func(abci.RequestInfo) (abci.ResponseInfo, error) + querySyncDelegate func(abci.RequestQuery) (abci.ResponseQuery, error) +) + +type mockQuery struct { + errorFn errorDelegate + echoSyncFn echoSyncDelegate + infoSyncFn infoSyncDelegate + querySyncFn querySyncDelegate +} + +func (m *mockQuery) Error() error { + if m.errorFn != nil { + return m.errorFn() + } + + return nil +} + +func (m *mockQuery) EchoSync(msg string) (abci.ResponseEcho, error) { + if m.echoSyncFn != nil { + return m.echoSyncFn(msg) + } + + return abci.ResponseEcho{}, nil +} + +func (m *mockQuery) InfoSync(info abci.RequestInfo) (abci.ResponseInfo, error) { + if m.infoSyncFn != nil { + return m.infoSyncFn(info) + } + + return abci.ResponseInfo{}, nil +} + +func (m *mockQuery) QuerySync(query abci.RequestQuery) (abci.ResponseQuery, error) { + if m.querySyncFn != nil { + return m.querySyncFn(query) + } + + return abci.ResponseQuery{}, nil +} diff --git a/tm2/pkg/bft/rpc/core/blocks.go b/tm2/pkg/bft/rpc/core/blocks.go deleted file mode 100644 index 9ca4e05a46f..00000000000 --- a/tm2/pkg/bft/rpc/core/blocks.go +++ /dev/null @@ -1,436 +0,0 @@ -package core - -import ( - "fmt" - - ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - rpctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" - sm "github.com/gnolang/gno/tm2/pkg/bft/state" - "github.com/gnolang/gno/tm2/pkg/bft/types" -) - -// Get block headers for minHeight <= height <= maxHeight. -// Block headers are returned in descending order (highest first). -// -// ```shell -// curl 'localhost:26657/blockchain?minHeight=10&maxHeight=10' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// info, err := client.BlockchainInfo(10, 10) -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// -// { -// "error": "", -// "result": { -// "block_metas": [ -// { -// "header": { -// "app_hash": "", -// "chain_id": "test-chain-6UTNIN", -// "height": "10", -// "time": "2017-05-29T15:05:53.877Z", -// "num_txs": "0", -// "last_block_id": { -// "parts": { -// "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", -// "total": "1" -// }, -// "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" -// }, -// "last_commit_hash": "F31CC4282E50B3F2A58D763D233D76F26D26CABE", -// "data_hash": "", -// "validators_hash": "9365FC80F234C967BD233F5A3E2AB2F1E4B0E5AA" -// }, -// "block_id": { -// "parts": { -// "hash": "277A4DBEF91483A18B85F2F5677ABF9694DFA40F", -// "total": "1" -// }, -// "hash": "96B1D2F2D201BA4BC383EB8224139DB1294944E5" -// } -// } -// ], -// "last_height": "5493" -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// -// ``` -// -// -func BlockchainInfo(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { - // maximum 20 block metas - const limit int64 = 20 - var err error - minHeight, maxHeight, err = filterMinMax(blockStore.Height(), minHeight, maxHeight, limit) - if err != nil { - return nil, err - } - logger.Debug("BlockchainInfoHandler", "maxHeight", maxHeight, "minHeight", minHeight) - - blockMetas := []*types.BlockMeta{} - for height := maxHeight; height >= minHeight; height-- { - blockMeta := blockStore.LoadBlockMeta(height) - blockMetas = append(blockMetas, blockMeta) - } - - return &ctypes.ResultBlockchainInfo{ - LastHeight: blockStore.Height(), - BlockMetas: blockMetas, - }, nil -} - -// error if either low or high are negative or low > high -// if low is 0 it defaults to 1, if high is 0 it defaults to height (block height). -// limit sets the maximum amounts of values included within [low,high] (inclusive), -// increasing low as necessary. -func filterMinMax(height, low, high, limit int64) (int64, int64, error) { - // filter negatives - if low < 0 || high < 0 { - return low, high, fmt.Errorf("heights must be non-negative") - } - - // adjust for default values - if low == 0 { - low = 1 - } - if high == 0 { - high = height - } - - // limit high to the height - high = min(height, high) - - // limit low to within `limit` of max - // so the total number of blocks returned will be `limit` - low = max(low, high-limit+1) - - if low > high { - return low, high, fmt.Errorf("min height %d can't be greater than max height %d", low, high) - } - return low, high, nil -} - -// Get block at a given height. -// If no height is provided, it will fetch the latest block. -// -// ```shell -// curl 'localhost:26657/block?height=10' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// info, err := client.Block(10) -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// -// { -// "error": "", -// "result": { -// "block": { -// "last_commit": { -// "precommits": [ -// { -// "signature": { -// "data": "12C0D8893B8A38224488DC1DE6270DF76BB1A5E9DB1C68577706A6A97C6EC34FFD12339183D5CA8BC2F46148773823DE905B7F6F5862FD564038BB7AE03BF50D", -// "type": "ed25519" -// }, -// "block_id": { -// "parts": { -// "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", -// "total": "1" -// }, -// "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" -// }, -// "type": "2", -// "round": "0", -// "height": "9", -// "validator_index": "0", -// "validator_address": "E89A51D60F68385E09E716D353373B11F8FACD62" -// } -// ], -// "blockID": { -// "parts": { -// "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", -// "total": "1" -// }, -// "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" -// } -// }, -// "data": { -// "txs": [] -// }, -// "header": { -// "app_hash": "", -// "chain_id": "test-chain-6UTNIN", -// "height": "10", -// "time": "2017-05-29T15:05:53.877Z", -// "num_txs": "0", -// "last_block_id": { -// "parts": { -// "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", -// "total": "1" -// }, -// "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" -// }, -// "last_commit_hash": "F31CC4282E50B3F2A58D763D233D76F26D26CABE", -// "data_hash": "", -// "validators_hash": "9365FC80F234C967BD233F5A3E2AB2F1E4B0E5AA" -// } -// }, -// "block_meta": { -// "header": { -// "app_hash": "", -// "chain_id": "test-chain-6UTNIN", -// "height": "10", -// "time": "2017-05-29T15:05:53.877Z", -// "num_txs": "0", -// "last_block_id": { -// "parts": { -// "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", -// "total": "1" -// }, -// "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" -// }, -// "last_commit_hash": "F31CC4282E50B3F2A58D763D233D76F26D26CABE", -// "data_hash": "", -// "validators_hash": "9365FC80F234C967BD233F5A3E2AB2F1E4B0E5AA" -// }, -// "block_id": { -// "parts": { -// "hash": "277A4DBEF91483A18B85F2F5677ABF9694DFA40F", -// "total": "1" -// }, -// "hash": "96B1D2F2D201BA4BC383EB8224139DB1294944E5" -// } -// } -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// -// ``` -func Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error) { - storeHeight := blockStore.Height() - height, err := getHeight(storeHeight, heightPtr) - if err != nil { - return nil, err - } - - blockMeta := blockStore.LoadBlockMeta(height) - block := blockStore.LoadBlock(height) - return &ctypes.ResultBlock{BlockMeta: blockMeta, Block: block}, nil -} - -// Get block commit at a given height. -// If no height is provided, it will fetch the commit for the latest block. -// -// ```shell -// curl 'localhost:26657/commit?height=11' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// info, err := client.Commit(11) -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// -// { -// "error": "", -// "result": { -// "canonical": true, -// "commit": { -// "precommits": [ -// { -// "signature": { -// "data": "00970429FEC652E9E21D106A90AE8C5413759A7488775CEF4A3F44DC46C7F9D941070E4FBE9ED54DF247FA3983359A0C3A238D61DE55C75C9116D72ABC9CF50F", -// "type": "ed25519" -// }, -// "block_id": { -// "parts": { -// "hash": "9E37CBF266BC044A779E09D81C456E653B89E006", -// "total": "1" -// }, -// "hash": "CC6E861E31CA4334E9888381B4A9137D1458AB6A" -// }, -// "type": "2", -// "round": "0", -// "height": "11", -// "validator_index": "0", -// "validator_address": "E89A51D60F68385E09E716D353373B11F8FACD62" -// } -// ], -// "blockID": { -// "parts": { -// "hash": "9E37CBF266BC044A779E09D81C456E653B89E006", -// "total": "1" -// }, -// "hash": "CC6E861E31CA4334E9888381B4A9137D1458AB6A" -// } -// }, -// "header": { -// "app_hash": "", -// "chain_id": "test-chain-6UTNIN", -// "height": "11", -// "time": "2017-05-29T15:05:54.893Z", -// "num_txs": "0", -// "last_block_id": { -// "parts": { -// "hash": "277A4DBEF91483A18B85F2F5677ABF9694DFA40F", -// "total": "1" -// }, -// "hash": "96B1D2F2D201BA4BC383EB8224139DB1294944E5" -// }, -// "last_commit_hash": "3CE0C9727CE524BA9CB7C91E28F08E2B94001087", -// "data_hash": "", -// "validators_hash": "9365FC80F234C967BD233F5A3E2AB2F1E4B0E5AA" -// } -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// -// ``` -func Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, error) { - storeHeight := blockStore.Height() - height, err := getHeight(storeHeight, heightPtr) - if err != nil { - return nil, err - } - - header := blockStore.LoadBlockMeta(height).Header - - // If the next block has not been committed yet, - // use a non-canonical commit - if height == storeHeight { - commit := blockStore.LoadSeenCommit(height) - return ctypes.NewResultCommit(&header, commit, false), nil - } - - // Return the canonical commit (comes from the block at height+1) - commit := blockStore.LoadBlockCommit(height) - return ctypes.NewResultCommit(&header, commit, true), nil -} - -// BlockResults gets ABCIResults at a given height. -// If no height is provided, it will fetch results for the latest block. -// -// Results are for the height of the block containing the txs. -// Thus response.results.deliver_tx[5] is the results of executing -// getBlock(h).Txs[5] -// -// ```shell -// curl 'localhost:26657/block_results?height=10' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// info, err := client.BlockResults(10) -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// -// { -// "jsonrpc": "2.0", -// "id": "", -// "result": { -// "height": "39", -// "results": { -// "deliver_tx": [ -// { -// "tags": [ -// { -// "key": "YXBwLmNyZWF0b3I=", -// "value": "Q29zbW9zaGkgTmV0b3dva28=" -// } -// ] -// } -// ], -// "end_block": { -// "validator_updates": null -// }, -// "begin_block": {} -// } -// } -// } -// -// ``` -func BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockResults, error) { - storeHeight := blockStore.Height() - height, err := getHeightWithMin(storeHeight, heightPtr, 0) - if err != nil { - return nil, err - } - - results, err := sm.LoadABCIResponses(stateDB, height) - if err != nil { - return nil, err - } - - res := &ctypes.ResultBlockResults{ - Height: height, - Results: results, - } - return res, nil -} - -func getHeight(currentHeight int64, heightPtr *int64) (int64, error) { - return getHeightWithMin(currentHeight, heightPtr, 1) -} - -func getHeightWithMin(currentHeight int64, heightPtr *int64, minVal int64) (int64, error) { - if heightPtr != nil { - height := *heightPtr - if height < minVal { - return 0, fmt.Errorf("height must be greater than or equal to %d", minVal) - } - if height > currentHeight { - return 0, fmt.Errorf("height must be less than or equal to the current blockchain height") - } - return height, nil - } - return currentHeight, nil -} diff --git a/tm2/pkg/bft/rpc/core/blocks/blocks.go b/tm2/pkg/bft/rpc/core/blocks/blocks.go new file mode 100644 index 00000000000..30d5178276f --- /dev/null +++ b/tm2/pkg/bft/rpc/core/blocks/blocks.go @@ -0,0 +1,230 @@ +package blocks + +import ( + "fmt" + + "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/params" + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/utils" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/metadata" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" + "github.com/gnolang/gno/tm2/pkg/bft/state" + "github.com/gnolang/gno/tm2/pkg/bft/types" + dbm "github.com/gnolang/gno/tm2/pkg/db" +) + +// Handler is the blocks RPC handler +type Handler struct { + store state.BlockStore + stateDB dbm.DB +} + +// NewHandler creates a new instance of the blocks RPC handler +func NewHandler(store state.BlockStore, stateDB dbm.DB) *Handler { + return &Handler{ + store: store, + stateDB: stateDB, + } +} + +// BlockchainInfoHandler fetches block headers for a given range. +// Block headers are returned in descending order (highest first) +// +// Params: +// - minHeight int64 (optional, default 1) +// - maxHeight int64 (optional, default latest height) +func (h *Handler) BlockchainInfoHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + const limit int64 = 20 + + const ( + idxMinHeight = 0 + idxMaxHeight = 1 + ) + + minHeight, err := params.AsInt64(p, idxMinHeight) + if err != nil { + return nil, err + } + + maxHeight, err := params.AsInt64(p, idxMaxHeight) + if err != nil { + return nil, err + } + + // Grab the latest height + storeHeight := h.store.Height() + + minHeight, maxHeight, filterErr := filterMinMax(storeHeight, minHeight, maxHeight, limit) + if filterErr != nil { + return nil, spec.GenerateResponseError(filterErr) + } + + blockMetas := make([]*types.BlockMeta, 0, maxHeight-minHeight+1) + for height := maxHeight; height >= minHeight; height-- { + blockMeta := h.store.LoadBlockMeta(height) + + if blockMeta == nil { + // This would be a huge problemo + continue + } + + blockMetas = append(blockMetas, blockMeta) + } + + return &ctypes.ResultBlockchainInfo{ + LastHeight: storeHeight, + BlockMetas: blockMetas, + }, nil +} + +// BlockHandler fetches the block at the given height. +// If no height is provided, it will fetch the latest block +// +// Params: +// - height int64 (optional, default latest height) +func (h *Handler) BlockHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + const idxHeight = 0 + + storeHeight := h.store.Height() + + height, err := params.AsInt64(p, idxHeight) + if err != nil { + return nil, err + } + + height, normalizeErr := utils.NormalizeHeight(storeHeight, height, 1) + if normalizeErr != nil { + return nil, spec.GenerateResponseError(normalizeErr) + } + + blockMeta := h.store.LoadBlockMeta(height) + if blockMeta == nil { + return nil, spec.GenerateResponseError( + fmt.Errorf("block meta not found for height %d", height), + ) + } + + block := h.store.LoadBlock(height) + if block == nil { + return nil, spec.GenerateResponseError( + fmt.Errorf("block not found for height %d", height), + ) + } + + return &ctypes.ResultBlock{ + BlockMeta: blockMeta, + Block: block, + }, nil +} + +// CommitHandler fetches the block commit for the given height. +// If no height is provided, it will fetch the commit for the latest block +// +// Params: +// - height int64 (optional, default latest height) +func (h *Handler) CommitHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + const idxHeight = 0 + + storeHeight := h.store.Height() + + height, err := params.AsInt64(p, idxHeight) + if err != nil { + return nil, err + } + + height, normalizeErr := utils.NormalizeHeight(storeHeight, height, 1) + if normalizeErr != nil { + return nil, spec.GenerateResponseError(normalizeErr) + } + + blockMeta := h.store.LoadBlockMeta(height) + if blockMeta == nil { + return nil, spec.GenerateResponseError( + fmt.Errorf("block meta not found for height %d", height), + ) + } + + header := blockMeta.Header + + if height == storeHeight { + // latest, non-canonical commit + commit := h.store.LoadSeenCommit(height) + if commit == nil { + return nil, spec.GenerateResponseError( + fmt.Errorf("seen commit not found for height %d", height), + ) + } + + return ctypes.NewResultCommit(&header, commit, false), nil + } + + // canonical commit (from height+1) + commit := h.store.LoadBlockCommit(height) + if commit == nil { + return nil, spec.GenerateResponseError( + fmt.Errorf("canonical commit not found for height %d", height), + ) + } + + return ctypes.NewResultCommit(&header, commit, true), nil +} + +// BlockResultsHandler fetches the ABCIResults for the given height. +// If no height is provided, it will fetch results for the latest block +// +// Params: +// - height int64 (optional, default latest height) +func (h *Handler) BlockResultsHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + storeHeight := h.store.Height() + + height, err := params.AsInt64(p, 0) + if err != nil { + return nil, err + } + + height, normalizeErr := utils.NormalizeHeight(storeHeight, height, 0) + if normalizeErr != nil { + return nil, spec.GenerateResponseError(normalizeErr) + } + + results, loadErr := state.LoadABCIResponses(h.stateDB, height) + if loadErr != nil { + return nil, spec.GenerateResponseError(loadErr) + } + + return &ctypes.ResultBlockResults{ + Height: height, + Results: results, + }, nil +} + +// error if either low or high are negative or low > high +// if low is 0 it defaults to 1, if high is 0 it defaults to height (block height). +// limit sets the maximum amounts of values included within [low,high] (inclusive), +// increasing low as necessary. +func filterMinMax(height, low, high, limit int64) (int64, int64, error) { + // filter negatives + if low < 0 || high < 0 { + return low, high, fmt.Errorf("heights must be non-negative") + } + + // adjust for default values + if low == 0 { + low = 1 + } + if high == 0 { + high = height + } + + // limit high to the height + high = min(height, high) + + // limit low to within `limit` of max + // so the total number of blocks returned will be `limit` + low = max(low, high-limit+1) + + if low > high { + return low, high, fmt.Errorf("min height %d can't be greater than max height %d", low, high) + } + return low, high, nil +} diff --git a/tm2/pkg/bft/rpc/core/blocks/blocks_test.go b/tm2/pkg/bft/rpc/core/blocks/blocks_test.go new file mode 100644 index 00000000000..b6f4058e688 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/blocks/blocks_test.go @@ -0,0 +1,814 @@ +package blocks + +import ( + "fmt" + "testing" + + "github.com/gnolang/gno/tm2/pkg/db/memdb" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" + sm "github.com/gnolang/gno/tm2/pkg/bft/state" + "github.com/gnolang/gno/tm2/pkg/bft/types" +) + +func TestHandler_BlockchainInfoHandler(t *testing.T) { + t.Parallel() + + t.Run("Invalid min height param", func(t *testing.T) { + t.Parallel() + + var ( + store = &mockBlockStore{} + params = []any{"foo", int64(10)} + ) + + h := NewHandler(store, nil) + + res, err := h.BlockchainInfoHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Filter error negative heights", func(t *testing.T) { + t.Parallel() + + const storeHeight int64 = 10 + + var ( + store = &mockBlockStore{ + heightFn: func() int64 { + return storeHeight + }, + } + params = []any{int64(-1), int64(5)} + ) + + h := NewHandler(store, nil) + + res, err := h.BlockchainInfoHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("Valid range default (no params)", func(t *testing.T) { + t.Parallel() + + const storeHeight int64 = 5 + + var ( + metas = map[int64]*types.BlockMeta{} + store = &mockBlockStore{ + heightFn: func() int64 { return storeHeight }, + loadBlockMetaFn: func(h int64) *types.BlockMeta { + return metas[h] + }, + } + ) + + // Update meta range + for h := int64(1); h <= storeHeight; h++ { + metas[h] = &types.BlockMeta{ + Header: types.Header{Height: h}, + } + } + + h := NewHandler(store, nil) + + res, err := h.BlockchainInfoHandler(nil, nil) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultBlockchainInfo) + require.True(t, ok) + + assert.Equal(t, storeHeight, result.LastHeight) + require.Len(t, result.BlockMetas, int(storeHeight)) + + expectedHeight := storeHeight + for i := 0; i < int(storeHeight); i++ { + assert.Equal(t, expectedHeight, result.BlockMetas[i].Header.Height) + + expectedHeight-- + } + }) + + t.Run("Valid range limited to 20", func(t *testing.T) { + t.Parallel() + + const storeHeight int64 = 30 + + var ( + metas = map[int64]*types.BlockMeta{} + store = &mockBlockStore{ + heightFn: func() int64 { + return storeHeight + }, + loadBlockMetaFn: func(h int64) *types.BlockMeta { + return metas[h] + }, + } + ) + + // Update the meta range + for h := int64(1); h <= storeHeight; h++ { + metas[h] = &types.BlockMeta{ + Header: types.Header{Height: h}, + } + } + + h := NewHandler(store, nil) + + res, err := h.BlockchainInfoHandler(nil, nil) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultBlockchainInfo) + require.True(t, ok) + + require.Len(t, result.BlockMetas, 20) + + expectedHeight := storeHeight + for i := 0; i < 20; i++ { + assert.Equal(t, expectedHeight, result.BlockMetas[i].Header.Height) + + expectedHeight-- + } + }) +} + +func TestHandler_BlockHandler(t *testing.T) { + t.Parallel() + + t.Run("Invalid height param", func(t *testing.T) { + t.Parallel() + + var ( + store = &mockBlockStore{} + params = []any{"foo"} + ) + + h := NewHandler(store, nil) + + res, err := h.BlockHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Height below minimum", func(t *testing.T) { + t.Parallel() + + const storeHeight int64 = 10 + + var ( + store = &mockBlockStore{ + heightFn: func() int64 { return storeHeight }, + } + params = []any{int64(-1)} + ) + + h := NewHandler(store, nil) + + res, err := h.BlockHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("Height above latest", func(t *testing.T) { + t.Parallel() + + const storeHeight int64 = 10 + + var ( + store = &mockBlockStore{ + heightFn: func() int64 { return storeHeight }, + } + params = []any{storeHeight + 1} + ) + + h := NewHandler(store, nil) + + res, err := h.BlockHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("Block meta missing", func(t *testing.T) { + t.Parallel() + + const storeHeight int64 = 10 + + var ( + store = &mockBlockStore{ + heightFn: func() int64 { + return storeHeight + }, + loadBlockMetaFn: func(_ int64) *types.BlockMeta { + return nil // explicit + }, + } + params = []any{storeHeight} + ) + + h := NewHandler(store, nil) + + res, err := h.BlockHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("Block missing", func(t *testing.T) { + t.Parallel() + + const storeHeight int64 = 10 + + var ( + store = &mockBlockStore{ + heightFn: func() int64 { return storeHeight }, + loadBlockMetaFn: func(h int64) *types.BlockMeta { + if h == storeHeight { + return &types.BlockMeta{ + Header: types.Header{ + Height: h, + }, + } + } + + return nil + }, + loadBlockFn: func(_ int64) *types.Block { + return nil // explicit + }, + } + params = []any{storeHeight} + ) + + h := NewHandler(store, nil) + + res, err := h.BlockHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("Valid block latest by default", func(t *testing.T) { + t.Parallel() + + const storeHeight int64 = 10 + + var ( + meta = &types.BlockMeta{ + Header: types.Header{ + Height: storeHeight, + }, + } + block = &types.Block{ + Header: types.Header{ + Height: storeHeight, + }, + } + + store = &mockBlockStore{ + heightFn: func() int64 { return storeHeight }, + loadBlockMetaFn: func(h int64) *types.BlockMeta { + if h == storeHeight { + return meta + } + + return nil + }, + loadBlockFn: func(h int64) *types.Block { + if h == storeHeight { + return block + } + + return nil + }, + } + ) + + h := NewHandler(store, nil) + + res, err := h.BlockHandler(nil, nil) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultBlock) + require.True(t, ok) + + assert.Equal(t, meta, result.BlockMeta) + assert.Equal(t, block, result.Block) + }) + + t.Run("Valid block at explicit height", func(t *testing.T) { + t.Parallel() + + const ( + storeHeight int64 = 10 + blockHeight int64 = 7 + ) + + var ( + meta = &types.BlockMeta{ + Header: types.Header{ + Height: blockHeight, + }, + } + block = &types.Block{ + Header: types.Header{ + Height: blockHeight, + }, + } + + store = &mockBlockStore{ + heightFn: func() int64 { + return storeHeight + }, + loadBlockMetaFn: func(h int64) *types.BlockMeta { + if h == blockHeight { + return meta + } + + return nil + }, + loadBlockFn: func(h int64) *types.Block { + if h == blockHeight { + return block + } + + return nil + }, + } + ) + + h := NewHandler(store, nil) + + res, err := h.BlockHandler(nil, []any{blockHeight}) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultBlock) + require.True(t, ok) + + assert.Same(t, meta, result.BlockMeta) + assert.Same(t, block, result.Block) + }) +} + +func TestHandler_CommitHandler(t *testing.T) { + t.Parallel() + + t.Run("Invalid height param", func(t *testing.T) { + t.Parallel() + + var ( + store = &mockBlockStore{} + h = NewHandler(store, nil) + params = []any{"foo"} + ) + + res, err := h.CommitHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Height below minimum", func(t *testing.T) { + t.Parallel() + + const storeHeight int64 = 10 + + var ( + store = &mockBlockStore{ + heightFn: func() int64 { return storeHeight }, + } + params = []any{int64(-1)} + ) + + h := NewHandler(store, nil) + + res, err := h.CommitHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("Block meta missing", func(t *testing.T) { + t.Parallel() + + const storeHeight int64 = 10 + + var ( + store = &mockBlockStore{ + heightFn: func() int64 { + return storeHeight + }, + loadBlockMetaFn: func(_ int64) *types.BlockMeta { + return nil // explicit + }, + } + params = []any{storeHeight} + ) + + h := NewHandler(store, nil) + + res, err := h.CommitHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("Seen commit missing at latest", func(t *testing.T) { + t.Parallel() + + const storeHeight int64 = 10 + + var ( + meta = &types.BlockMeta{ + Header: types.Header{ + Height: storeHeight, + }, + } + + store = &mockBlockStore{ + heightFn: func() int64 { + return storeHeight + }, + loadBlockMetaFn: func(h int64) *types.BlockMeta { + if h == storeHeight { + return meta + } + + return nil + }, + loadBlockCommitFn: func(_ int64) *types.Commit { + return nil // explicit + }, + } + params = []any{storeHeight} + ) + + h := NewHandler(store, nil) + + res, err := h.CommitHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("Canonical commit missing for past height", func(t *testing.T) { + t.Parallel() + + const ( + storeHeight int64 = 10 + targetHeight int64 = 9 + ) + + var ( + meta = &types.BlockMeta{ + Header: types.Header{ + Height: storeHeight, + }, + } + + store = &mockBlockStore{ + heightFn: func() int64 { + return storeHeight + }, + loadBlockMetaFn: func(h int64) *types.BlockMeta { + if h == targetHeight { + return meta + } + + return nil + }, + loadBlockCommitFn: func(_ int64) *types.Commit { + return nil // explicit + }, + } + params = []any{targetHeight} + ) + + h := NewHandler(store, nil) + + res, err := h.CommitHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("Non-canonical commit at latest height", func(t *testing.T) { + t.Parallel() + + const storeHeight int64 = 10 + + var ( + meta = &types.BlockMeta{ + Header: types.Header{ + Height: storeHeight, + }, + } + commit = &types.Commit{} + + store = &mockBlockStore{ + heightFn: func() int64 { + return storeHeight + }, + loadBlockMetaFn: func(h int64) *types.BlockMeta { + if h == storeHeight { + return meta + } + + return nil + }, + loadSeenCommitFn: func(h int64) *types.Commit { + if h == storeHeight { + return commit + } + + return nil + }, + } + ) + + h := NewHandler(store, nil) + + res, err := h.CommitHandler(nil, []any{storeHeight}) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultCommit) + require.True(t, ok) + + assert.False(t, result.CanonicalCommit) + }) + + t.Run("Canonical commit at past height", func(t *testing.T) { + t.Parallel() + + const ( + storeHeight int64 = 10 + targetHeight int64 = 9 + ) + + store := &mockBlockStore{ + heightFn: func() int64 { + return storeHeight + }, + loadBlockMetaFn: func(h int64) *types.BlockMeta { + if h == targetHeight { + return &types.BlockMeta{ + Header: types.Header{ + Height: h, + }, + } + } + + return nil + }, + loadBlockCommitFn: func(h int64) *types.Commit { + if h == targetHeight { + return &types.Commit{} + } + + return nil + }, + } + + h := NewHandler(store, nil) + + res, err := h.CommitHandler(nil, []any{targetHeight}) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultCommit) + require.True(t, ok) + + assert.True(t, result.CanonicalCommit) + }) +} + +func TestHandler_BlockResultsHandler(t *testing.T) { + t.Parallel() + + t.Run("Invalid height param", func(t *testing.T) { + t.Parallel() + + var ( + store = &mockBlockStore{} + stateDB = memdb.NewMemDB() + params = []any{"foo"} + ) + + h := NewHandler(store, stateDB) + + res, err := h.BlockResultsHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Height above latest", func(t *testing.T) { + t.Parallel() + + const storeHeight int64 = 10 + + var ( + store = &mockBlockStore{ + heightFn: func() int64 { return storeHeight }, + } + stateDB = memdb.NewMemDB() + params = []any{storeHeight + 1} + ) + + h := NewHandler(store, stateDB) + + res, err := h.BlockResultsHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("ABCI response load error", func(t *testing.T) { + t.Parallel() + + const storeHeight int64 = 10 + + var ( + store = &mockBlockStore{ + heightFn: func() int64 { return storeHeight }, + } + stateDB = memdb.NewMemDB() + params = []any{storeHeight} + ) + + h := NewHandler(store, stateDB) + + res, err := h.BlockResultsHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("Valid block results", func(t *testing.T) { + t.Parallel() + + const ( + storeHeight int64 = 10 + targetHeight int64 = 7 + ) + + var ( + expectedResponses = &sm.ABCIResponses{} + + store = &mockBlockStore{ + heightFn: func() int64 { + return storeHeight + }, + } + stateDB = memdb.NewMemDB() + ) + + h := NewHandler(store, stateDB) + + require.NotPanics(t, func() { + sm.SaveABCIResponses(stateDB, targetHeight, expectedResponses) + }) + + res, err := h.BlockResultsHandler(nil, []any{targetHeight}) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultBlockResults) + require.True(t, ok) + + assert.Equal(t, targetHeight, result.Height) + assert.NotNil(t, result.Results) + }) +} + +func TestFilterMinMax(t *testing.T) { + t.Parallel() + + t.Run("Negative heights", func(t *testing.T) { + t.Parallel() + + _, _, err := filterMinMax(10, -1, 5, 20) + require.Error(t, err) + + assert.Contains(t, err.Error(), "heights must be non-negative") + }) + + t.Run("Defaults within limit", func(t *testing.T) { + t.Parallel() + + low, high, err := filterMinMax(10, 0, 0, 20) + require.NoError(t, err) + + assert.Equal(t, int64(1), low) + assert.Equal(t, int64(10), high) + }) + + t.Run("Clamp high to current height", func(t *testing.T) { + t.Parallel() + + low, high, err := filterMinMax(10, 5, 100, 20) + require.NoError(t, err) + + assert.Equal(t, int64(5), low) + assert.Equal(t, int64(10), high) + }) + + t.Run("Limit window size", func(t *testing.T) { + t.Parallel() + + low, high, err := filterMinMax(100, 1, 100, 20) + require.NoError(t, err) + + assert.Equal(t, int64(81), low) + assert.Equal(t, int64(100), high) + }) + + t.Run("Low greater than high", func(t *testing.T) { + t.Parallel() + + low, high, err := filterMinMax(5, 10, 1, 20) + require.Error(t, err) + + assert.Greater(t, low, high) + assert.Contains(t, err.Error(), "min height") + }) +} + +func TestFilterMinMax_Legacy(t *testing.T) { + t.Parallel() + + cases := []struct { + minVal, maxVal int64 + height int64 + limit int64 + resultLength int64 + wantErr bool + }{ + // min > max + {0, 0, 0, 10, 0, true}, // min set to 1 + {0, 1, 0, 10, 0, true}, // max set to height (0) + {0, 0, 1, 10, 1, false}, // max set to height (1) + {2, 0, 1, 10, 0, true}, // max set to height (1) + {2, 1, 5, 10, 0, true}, + + // negative + {1, 10, 14, 10, 10, false}, // control + {-1, 10, 14, 10, 0, true}, + {1, -10, 14, 10, 0, true}, + {-9223372036854775808, -9223372036854775788, 100, 20, 0, true}, + + // check limit and height + {1, 1, 1, 10, 1, false}, + {1, 1, 5, 10, 1, false}, + {2, 2, 5, 10, 1, false}, + {1, 2, 5, 10, 2, false}, + {1, 5, 1, 10, 1, false}, + {1, 5, 10, 10, 5, false}, + {1, 15, 10, 10, 10, false}, + {1, 15, 15, 10, 10, false}, + {1, 15, 15, 20, 15, false}, + {1, 20, 15, 20, 15, false}, + {1, 20, 20, 20, 20, false}, + } + + for i, c := range cases { + caseString := fmt.Sprintf("test %d failed", i) + + minVal, maxVal, err := filterMinMax(c.height, c.minVal, c.maxVal, c.limit) + if c.wantErr { + require.Error(t, err, caseString) + } else { + require.NoError(t, err, caseString) + require.Equal(t, 1+maxVal-minVal, c.resultLength, caseString) + } + } +} diff --git a/tm2/pkg/bft/rpc/core/blocks/mock_test.go b/tm2/pkg/bft/rpc/core/blocks/mock_test.go new file mode 100644 index 00000000000..4689884eda8 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/blocks/mock_test.go @@ -0,0 +1,85 @@ +package blocks + +import "github.com/gnolang/gno/tm2/pkg/bft/types" + +type ( + heightDelegate func() int64 + loadBlockMetaDelegate func(int64) *types.BlockMeta + loadBlockDelegate func(int64) *types.Block + loadSeenCommitDelegate func(int64) *types.Commit + loadBlockCommitDelegate func(int64) *types.Commit + loadBlockByHashDelegate func([]byte) *types.Block + loadBlockPartDelegate func(int64, int) *types.Part + saveBlockDelegate func(*types.Block, *types.PartSet, *types.Commit) +) + +type mockBlockStore struct { + heightFn heightDelegate + loadBlockMetaFn loadBlockMetaDelegate + loadBlockFn loadBlockDelegate + loadSeenCommitFn loadSeenCommitDelegate + loadBlockCommitFn loadBlockCommitDelegate + loadBlockByHashFn loadBlockByHashDelegate + loadBlockPartFn loadBlockPartDelegate + saveBlockFn saveBlockDelegate +} + +func (m *mockBlockStore) Height() int64 { + if m.heightFn != nil { + return m.heightFn() + } + + return 0 +} + +func (m *mockBlockStore) LoadBlockMeta(h int64) *types.BlockMeta { + if m.loadBlockMetaFn != nil { + return m.loadBlockMetaFn(h) + } + + return nil +} + +func (m *mockBlockStore) LoadBlock(h int64) *types.Block { + if m.loadBlockFn != nil { + return m.loadBlockFn(h) + } + + return nil +} + +func (m *mockBlockStore) LoadSeenCommit(h int64) *types.Commit { + if m.loadSeenCommitFn != nil { + return m.loadSeenCommitFn(h) + } + + return nil +} + +func (m *mockBlockStore) LoadBlockCommit(h int64) *types.Commit { + if m.loadBlockCommitFn != nil { + return m.loadBlockCommitFn(h) + } + + return nil +} + +func (m *mockBlockStore) LoadBlockByHash(hash []byte) *types.Block { + if m.loadBlockByHashFn != nil { + return m.loadBlockByHashFn(hash) + } + + return nil +} +func (m *mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { + if m.loadBlockPartFn != nil { + return m.loadBlockPartFn(height, index) + } + + return nil +} +func (m *mockBlockStore) SaveBlock(block *types.Block, set *types.PartSet, commit *types.Commit) { + if m.saveBlockFn != nil { + m.saveBlockFn(block, set, commit) + } +} diff --git a/tm2/pkg/bft/rpc/core/blocks_test.go b/tm2/pkg/bft/rpc/core/blocks_test.go deleted file mode 100644 index dd55784ada0..00000000000 --- a/tm2/pkg/bft/rpc/core/blocks_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package core - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestBlockchainInfo(t *testing.T) { - t.Parallel() - - cases := []struct { - minVal, maxVal int64 - height int64 - limit int64 - resultLength int64 - wantErr bool - }{ - // min > max - {0, 0, 0, 10, 0, true}, // min set to 1 - {0, 1, 0, 10, 0, true}, // max set to height (0) - {0, 0, 1, 10, 1, false}, // max set to height (1) - {2, 0, 1, 10, 0, true}, // max set to height (1) - {2, 1, 5, 10, 0, true}, - - // negative - {1, 10, 14, 10, 10, false}, // control - {-1, 10, 14, 10, 0, true}, - {1, -10, 14, 10, 0, true}, - {-9223372036854775808, -9223372036854775788, 100, 20, 0, true}, - - // check limit and height - {1, 1, 1, 10, 1, false}, - {1, 1, 5, 10, 1, false}, - {2, 2, 5, 10, 1, false}, - {1, 2, 5, 10, 2, false}, - {1, 5, 1, 10, 1, false}, - {1, 5, 10, 10, 5, false}, - {1, 15, 10, 10, 10, false}, - {1, 15, 15, 10, 10, false}, - {1, 15, 15, 20, 15, false}, - {1, 20, 15, 20, 15, false}, - {1, 20, 20, 20, 20, false}, - } - - for i, c := range cases { - caseString := fmt.Sprintf("test %d failed", i) - minVal, maxVal, err := filterMinMax(c.height, c.minVal, c.maxVal, c.limit) - if c.wantErr { - require.Error(t, err, caseString) - } else { - require.NoError(t, err, caseString) - require.Equal(t, 1+maxVal-minVal, c.resultLength, caseString) - } - } -} - -func TestGetHeight(t *testing.T) { - t.Parallel() - - cases := []struct { - currentHeight int64 - heightPtr *int64 - minVal int64 - res int64 - wantErr bool - }{ - // height >= min - {42, int64Ptr(0), 0, 0, false}, - {42, int64Ptr(1), 0, 1, false}, - - // height < min - {42, int64Ptr(0), 1, 0, true}, - - // nil height - {42, nil, 1, 42, false}, - } - - for i, c := range cases { - caseString := fmt.Sprintf("test %d failed", i) - res, err := getHeightWithMin(c.currentHeight, c.heightPtr, c.minVal) - if c.wantErr { - require.Error(t, err, caseString) - } else { - require.NoError(t, err, caseString) - require.Equal(t, res, c.res, caseString) - } - } -} - -func int64Ptr(v int64) *int64 { - return &v -} diff --git a/tm2/pkg/bft/rpc/core/consensus.go b/tm2/pkg/bft/rpc/core/consensus.go deleted file mode 100644 index e87e6b526d3..00000000000 --- a/tm2/pkg/bft/rpc/core/consensus.go +++ /dev/null @@ -1,360 +0,0 @@ -package core - -import ( - cstypes "github.com/gnolang/gno/tm2/pkg/bft/consensus/types" - ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - rpctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" - sm "github.com/gnolang/gno/tm2/pkg/bft/state" - "github.com/gnolang/gno/tm2/pkg/bft/types" -) - -// Get the validator set at the given block height. -// If no height is provided, it will fetch the current validator set. -// Note the validators are sorted by their address - this is the canonical -// order for the validators in the set as used in computing their Merkle root. -// -// ```shell -// curl 'localhost:26657/validators' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// state, err := client.Validators() -// ``` -// -// The above command returns JSON structured like this: -// -// ```json -// -// { -// "error": "", -// "result": { -// "validators": [ -// { -// "proposer_priority": "0", -// "voting_power": "10", -// "pub_key": { -// "data": "68DFDA7E50F82946E7E8546BED37944A422CD1B831E70DF66BA3B8430593944D", -// "type": "ed25519" -// }, -// "address": "E89A51D60F68385E09E716D353373B11F8FACD62" -// } -// ], -// "block_height": "5241" -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// -// ``` -func Validators(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultValidators, error) { - // The latest validator that we know is the - // NextValidator of the last block. - height := consensusState.GetState().LastBlockHeight + 1 - height, err := getHeight(height, heightPtr) - if err != nil { - return nil, err - } - - validators, err := sm.LoadValidators(stateDB, height) - if err != nil { - return nil, err - } - return &ctypes.ResultValidators{ - BlockHeight: height, - Validators: validators.Validators, - }, nil -} - -// DumpConsensusState dumps consensus state. -// UNSTABLE -// -// ```shell -// curl 'localhost:26657/dump_consensus_state' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// state, err := client.DumpConsensusState() -// ``` -// -// The above command returns JSON structured like this: -// -// ```json -// -// { -// "jsonrpc": "2.0", -// "id": "", -// "result": { -// "round_state": { -// "height": "7185", -// "round": "0", -// "step": "1", -// "start_time": "2018-05-12T13:57:28.440293621-07:00", -// "commit_time": "2018-05-12T13:57:27.440293621-07:00", -// "validators": { -// "validators": [ -// { -// "address": "B5B3D40BE53982AD294EF99FF5A34C0C3E5A3244", -// "pub_key": { -// "type": "tendermint/PubKeyEd25519", -// "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg=" -// }, -// "voting_power": "10", -// "proposer_priority": "0" -// } -// ], -// "proposer": { -// "address": "B5B3D40BE53982AD294EF99FF5A34C0C3E5A3244", -// "pub_key": { -// "type": "tendermint/PubKeyEd25519", -// "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg=" -// }, -// "voting_power": "10", -// "proposer_priority": "0" -// } -// }, -// "proposal": null, -// "proposal_block": null, -// "proposal_block_parts": null, -// "locked_round": "0", -// "locked_block": null, -// "locked_block_parts": null, -// "valid_round": "0", -// "valid_block": null, -// "valid_block_parts": null, -// "votes": [ -// { -// "round": "0", -// "prevotes": "_", -// "precommits": "_" -// } -// ], -// "commit_round": "-1", -// "last_commit": { -// "votes": [ -// "Vote{0:B5B3D40BE539 7184/00/2(Precommit) 14F946FA7EF0 /702B1B1A602A.../ @ 2018-05-12T20:57:27.342Z}" -// ], -// "votes_bit_array": "x", -// "peer_maj_23s": {} -// }, -// "last_validators": { -// "validators": [ -// { -// "address": "B5B3D40BE53982AD294EF99FF5A34C0C3E5A3244", -// "pub_key": { -// "type": "tendermint/PubKeyEd25519", -// "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg=" -// }, -// "voting_power": "10", -// "proposer_priority": "0" -// } -// ], -// "proposer": { -// "address": "B5B3D40BE53982AD294EF99FF5A34C0C3E5A3244", -// "pub_key": { -// "type": "tendermint/PubKeyEd25519", -// "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg=" -// }, -// "voting_power": "10", -// "proposer_priority": "0" -// } -// } -// }, -// "peers": [ -// { -// "node_address": "30ad1854af22506383c3f0e57fb3c7f90984c5e8@172.16.63.221:26656", -// "peer_state": { -// "round_state": { -// "height": "7185", -// "round": "0", -// "step": "1", -// "start_time": "2018-05-12T13:57:27.438039872-07:00", -// "proposal": false, -// "proposal_block_parts_header": { -// "total": "0", -// "hash": "" -// }, -// "proposal_block_parts": null, -// "proposal_pol_round": "-1", -// "proposal_pol": "_", -// "prevotes": "_", -// "precommits": "_", -// "last_commit_round": "0", -// "last_commit": "x", -// "catchup_commit_round": "-1", -// "catchup_commit": "_" -// }, -// "stats": { -// "last_vote_height": "7184", -// "votes": "255", -// "last_block_part_height": "7184", -// "block_parts": "255" -// } -// } -// } -// ] -// } -// } -// -// ``` -func DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) { - // Get Peer consensus states. - peers := p2pPeers.Peers().List() - peerStates := make([]ctypes.PeerStateInfo, len(peers)) - for i, peer := range peers { - peerState, ok := peer.Get(types.PeerStateKey).(interface { - GetExposed() cstypes.PeerStateExposed - }) - if !ok { // peer does not have a state yet - continue - } - peerStateJSON, err := peerState.GetExposed().ToJSON() - if err != nil { - return nil, err - } - peerStates[i] = ctypes.PeerStateInfo{ - // Peer basic info. - NodeAddress: peer.SocketAddr().String(), - // Peer consensus state. - PeerState: peerStateJSON, - } - } - // Get self round state. - config := consensusState.GetConfigDeepCopy() - roundState := consensusState.GetRoundStateDeepCopy() - return &ctypes.ResultDumpConsensusState{ - Config: config, - RoundState: roundState, - Peers: peerStates, - }, nil -} - -// ConsensusState returns a concise summary of the consensus state. -// UNSTABLE -// -// ```shell -// curl 'localhost:26657/consensus_state' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// state, err := client.ConsensusState() -// ``` -// -// The above command returns JSON structured like this: -// -// ```json -// -// { -// "jsonrpc": "2.0", -// "id": "", -// "result": { -// "round_state": { -// "height/round/step": "9336/0/1", -// "start_time": "2018-05-14T10:25:45.72595357-04:00", -// "proposal_block_hash": "", -// "locked_block_hash": "", -// "valid_block_hash": "", -// "height_vote_set": [ -// { -// "round": "0", -// "prevotes": [ -// "nil-Vote" -// ], -// "prevotes_bit_array": "BA{1:_} 0/10 = 0.00", -// "precommits": [ -// "nil-Vote" -// ], -// "precommits_bit_array": "BA{1:_} 0/10 = 0.00" -// } -// ] -// } -// } -// } -// -// ``` -func ConsensusState(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) { - // Get self round state. - rs := consensusState.GetRoundStateSimple() - return &ctypes.ResultConsensusState{RoundState: rs}, nil -} - -// Get the consensus parameters at the given block height. -// If no height is provided, it will fetch the current consensus params. -// -// ```shell -// curl 'localhost:26657/consensus_params' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// state, err := client.ConsensusParams() -// ``` -// -// The above command returns JSON structured like this: -// -// ```json -// -// { -// "jsonrpc": "2.0", -// "id": "", -// "result": { -// "block_height": "1", -// "consensus_params": { -// "block_size_params": { -// "max_txs_bytes": "22020096", -// "max_gas": "-1" -// }, -// "evidence_params": { -// "max_age": "100000" -// } -// } -// } -// } -// -// ``` -func ConsensusParams(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultConsensusParams, error) { - height := consensusState.GetState().LastBlockHeight + 1 - height, err := getHeight(height, heightPtr) - if err != nil { - return nil, err - } - - consensusparams, err := sm.LoadConsensusParams(stateDB, height) - if err != nil { - return nil, err - } - return &ctypes.ResultConsensusParams{ - BlockHeight: height, - ConsensusParams: consensusparams, - }, nil -} diff --git a/tm2/pkg/bft/rpc/core/consensus/consensus.go b/tm2/pkg/bft/rpc/core/consensus/consensus.go new file mode 100644 index 00000000000..2b46d1a3874 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/consensus/consensus.go @@ -0,0 +1,150 @@ +package consensus + +import ( + cstypes "github.com/gnolang/gno/tm2/pkg/bft/consensus/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/params" + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/utils" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/metadata" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" + sm "github.com/gnolang/gno/tm2/pkg/bft/state" + "github.com/gnolang/gno/tm2/pkg/bft/types" + dbm "github.com/gnolang/gno/tm2/pkg/db" +) + +// Handler is the consensus RPC handler +type Handler struct { + consensusState Consensus + stateDB dbm.DB + peers ctypes.Peers +} + +// NewHandler creates a new instance of the consensus RPC handler +func NewHandler(consensusState Consensus, stateDB dbm.DB, peers ctypes.Peers) *Handler { + return &Handler{ + consensusState: consensusState, + stateDB: stateDB, + peers: peers, + } +} + +// ValidatorsHandler returns the validator set at the given height. +// If no height is provided, it will fetch the current validator set. +// Note the validators are sorted by their address - this is the canonical +// order for the validators in the set as used in computing their Merkle root +// +// Params: +// - height int64 (optional, default latest height) +func (h *Handler) ValidatorsHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + const idxHeight = 0 + + heightVal, err := params.AsInt64(p, idxHeight) + if err != nil { + return nil, err + } + + latest := h.consensusState.GetState().LastBlockHeight + 1 + + height, normErr := utils.NormalizeHeight(latest, heightVal, 1) + if normErr != nil { + return nil, spec.GenerateResponseError(normErr) + } + + validators, loadErr := sm.LoadValidators(h.stateDB, height) + if loadErr != nil { + return nil, spec.GenerateResponseError(loadErr) + } + + return &ctypes.ResultValidators{ + BlockHeight: height, + Validators: validators.Validators, + }, nil +} + +// DumpConsensusStateHandler dumps the full consensus state (UNSTABLE) +// +// No params +func (h *Handler) DumpConsensusStateHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + if len(p) > 0 { + return nil, spec.GenerateInvalidParamError(1) + } + + var ( + peers = h.peers.Peers().List() + peerStates = make([]ctypes.PeerStateInfo, len(peers)) + ) + + for i, peer := range peers { + ps, ok := peer.Get(types.PeerStateKey).(interface { + GetExposed() cstypes.PeerStateExposed + }) + + if !ok { + continue + } + + psJSON, err := ps.GetExposed().ToJSON() + if err != nil { + return nil, spec.GenerateResponseError(err) + } + + peerStates[i] = ctypes.PeerStateInfo{ + NodeAddress: peer.SocketAddr().String(), + PeerState: psJSON, + } + } + + var ( + config = h.consensusState.GetConfigDeepCopy() + roundState = h.consensusState.GetRoundStateDeepCopy() + ) + + return &ctypes.ResultDumpConsensusState{ + Config: config, + RoundState: roundState, + Peers: peerStates, + }, nil +} + +// ConsensusStateHandler returns a concise summary of the consensus state (UNSTABLE) +// +// No params +func (h *Handler) ConsensusStateHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + if len(p) > 0 { + return nil, spec.GenerateInvalidParamError(1) + } + + return &ctypes.ResultConsensusState{ + RoundState: h.consensusState.GetRoundStateSimple(), + }, nil +} + +// ConsensusParamsHandler returns consensus params at a given height. +// +// Params: +// - height int64 (optional, default latest height) +func (h *Handler) ConsensusParamsHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + const idxHeight = 0 + + heightVal, err := params.AsInt64(p, idxHeight) + if err != nil { + return nil, err + } + + latest := h.consensusState.GetState().LastBlockHeight + 1 + + height, normErr := utils.NormalizeHeight(latest, heightVal, 1) + if normErr != nil { + return nil, spec.GenerateResponseError(normErr) + } + + consensusParams, loadErr := sm.LoadConsensusParams(h.stateDB, height) + if loadErr != nil { + return nil, spec.GenerateResponseError(loadErr) + } + + return &ctypes.ResultConsensusParams{ + BlockHeight: height, + ConsensusParams: consensusParams, + }, nil +} diff --git a/tm2/pkg/bft/rpc/core/consensus/consensus_test.go b/tm2/pkg/bft/rpc/core/consensus/consensus_test.go new file mode 100644 index 00000000000..9176661fedf --- /dev/null +++ b/tm2/pkg/bft/rpc/core/consensus/consensus_test.go @@ -0,0 +1,350 @@ +package consensus + +import ( + "testing" + + abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/mock" + "github.com/gnolang/gno/tm2/pkg/db/memdb" + "github.com/gnolang/gno/tm2/pkg/p2p" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + cnscfg "github.com/gnolang/gno/tm2/pkg/bft/consensus/config" + cstypes "github.com/gnolang/gno/tm2/pkg/bft/consensus/types" + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" + sm "github.com/gnolang/gno/tm2/pkg/bft/state" + "github.com/gnolang/gno/tm2/pkg/bft/types" +) + +func TestHandler_ValidatorsHandler(t *testing.T) { + t.Parallel() + + t.Run("Invalid height param", func(t *testing.T) { + t.Parallel() + + var ( + db = memdb.NewMemDB() + mockConsensus = &mockConsensus{ + getStateFn: func() sm.State { + return sm.State{ + LastBlockHeight: 10, + } + }, + } + params = []any{"not-an-int"} + ) + + h := NewHandler(mockConsensus, db, &mock.Peers{}) + + res, err := h.ValidatorsHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Height below minimum", func(t *testing.T) { + t.Parallel() + + var ( + db = memdb.NewMemDB() + mockConsensus = &mockConsensus{ + getStateFn: func() sm.State { + return sm.State{ + LastBlockHeight: 10, + } + }, + } + params = []any{int64(-1)} + ) + + h := NewHandler(mockConsensus, db, &mock.Peers{}) + + res, err := h.ValidatorsHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("Validators not found", func(t *testing.T) { + t.Parallel() + + var ( + db = memdb.NewMemDB() + mockConsensus = &mockConsensus{ + getStateFn: func() sm.State { + return sm.State{ + LastBlockHeight: 0, + } + }, + } + ) + + h := NewHandler(mockConsensus, db, &mock.Peers{}) + + res, err := h.ValidatorsHandler(nil, nil) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("Valid default height", func(t *testing.T) { + t.Parallel() + + var ( + db = memdb.NewMemDB() + + valSet = &types.ValidatorSet{} + consensusParams = abci.ConsensusParams{} + + st = sm.State{ + LastBlockHeight: 0, + Validators: valSet, + NextValidators: valSet, + LastHeightValidatorsChanged: 1, + ConsensusParams: consensusParams, + LastHeightConsensusParamsChanged: 1, + } + + mockConsensus = &mockConsensus{ + getStateFn: func() sm.State { + return st + }, + } + ) + + // Seed the state + sm.SaveState(db, st) + + h := NewHandler(mockConsensus, db, &mock.Peers{}) + + res, err := h.ValidatorsHandler(nil, nil) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultValidators) + require.True(t, ok) + + assert.Equal(t, int64(1), result.BlockHeight) + assert.Equal(t, valSet.Validators, result.Validators) + }) +} + +func TestHandler_DumpConsensusStateHandler(t *testing.T) { + t.Parallel() + + t.Run("Unexpected params", func(t *testing.T) { + t.Parallel() + + h := NewHandler(nil, nil, nil) + + res, err := h.DumpConsensusStateHandler(nil, []any{"extra"}) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Valid dump", func(t *testing.T) { + t.Parallel() + + var ( + cfg = &cnscfg.ConsensusConfig{} + rs = &cstypes.RoundState{} + + mockConsensus = &mockConsensus{ + getConfigDeepCopyFn: func() *cnscfg.ConsensusConfig { + return cfg + }, + getRoundStateDeepCopyFn: func() *cstypes.RoundState { + return rs + }, + } + + mockPeers = &mock.Peers{ + PeersFn: func() p2p.PeerSet { + return &mock.PeerSet{} + }, + } + ) + + h := NewHandler(mockConsensus, nil, mockPeers) + + res, err := h.DumpConsensusStateHandler(nil, nil) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultDumpConsensusState) + require.True(t, ok) + + assert.Same(t, cfg, result.Config) + assert.Same(t, rs, result.RoundState) + assert.Len(t, result.Peers, 0) + }) +} + +func TestHandler_ConsensusStateHandler(t *testing.T) { + t.Parallel() + + t.Run("Unexpected params", func(t *testing.T) { + t.Parallel() + + h := NewHandler( + &mockConsensus{}, + memdb.NewMemDB(), + &mock.Peers{}, + ) + + res, err := h.ConsensusStateHandler(nil, []any{"extra"}) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Valid simple round state", func(t *testing.T) { + t.Parallel() + + var ( + simple = cstypes.RoundStateSimple{ + HeightRoundStep: "10/0/0", + } + + mockConsensus = &mockConsensus{ + getRoundStateSimpleFn: func() cstypes.RoundStateSimple { + return simple + }, + } + ) + + h := NewHandler(mockConsensus, memdb.NewMemDB(), &mock.Peers{}) + + res, err := h.ConsensusStateHandler(nil, nil) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultConsensusState) + require.True(t, ok) + + assert.Equal(t, simple, result.RoundState) + }) +} + +func TestHandler_ConsensusParamsHandler(t *testing.T) { + t.Parallel() + + t.Run("Invalid height param", func(t *testing.T) { + t.Parallel() + + var ( + mockConsensus = &mockConsensus{ + getStateFn: func() sm.State { + return sm.State{ + LastBlockHeight: 10, + } + }, + } + db = memdb.NewMemDB() + params = []any{"not-an-int"} + ) + + h := NewHandler(mockConsensus, db, &mock.Peers{}) + + res, err := h.ConsensusParamsHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Height below minimum", func(t *testing.T) { + t.Parallel() + + var ( + mockConsensus = &mockConsensus{ + getStateFn: func() sm.State { + return sm.State{ + LastBlockHeight: 10, + } + }, + } + + db = memdb.NewMemDB() + params = []any{int64(-1)} + ) + + h := NewHandler(mockConsensus, db, &mock.Peers{}) + + res, err := h.ConsensusParamsHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("Consensus params not found", func(t *testing.T) { + t.Parallel() + + var ( + mockConsensus = &mockConsensus{ + getStateFn: func() sm.State { + return sm.State{ + LastBlockHeight: 0, + } + }, + } + + db = memdb.NewMemDB() + ) + + h := NewHandler(mockConsensus, db, &mock.Peers{}) + + res, err := h.ConsensusParamsHandler(nil, nil) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("Valid latest height", func(t *testing.T) { + t.Parallel() + + var ( + db = memdb.NewMemDB() + consensusParams = abci.ConsensusParams{} + + st = sm.State{ + LastBlockHeight: 0, + Validators: &types.ValidatorSet{}, + NextValidators: &types.ValidatorSet{}, + LastHeightValidatorsChanged: 1, + ConsensusParams: consensusParams, + LastHeightConsensusParamsChanged: 1, + } + + mockConsensus = &mockConsensus{ + getStateFn: func() sm.State { + return st + }, + } + ) + + sm.SaveState(db, st) + + h := NewHandler(mockConsensus, db, &mock.Peers{}) + + res, err := h.ConsensusParamsHandler(nil, nil) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultConsensusParams) + require.True(t, ok) + + assert.Equal(t, int64(1), result.BlockHeight) + assert.Equal(t, consensusParams, result.ConsensusParams) + }) +} diff --git a/tm2/pkg/bft/rpc/core/consensus/mock_test.go b/tm2/pkg/bft/rpc/core/consensus/mock_test.go new file mode 100644 index 00000000000..68310b1e35c --- /dev/null +++ b/tm2/pkg/bft/rpc/core/consensus/mock_test.go @@ -0,0 +1,74 @@ +package consensus + +import ( + cnscfg "github.com/gnolang/gno/tm2/pkg/bft/consensus/config" + cstypes "github.com/gnolang/gno/tm2/pkg/bft/consensus/types" + sm "github.com/gnolang/gno/tm2/pkg/bft/state" + "github.com/gnolang/gno/tm2/pkg/bft/types" +) + +type ( + getConfigDeepCopyDelegate func() *cnscfg.ConsensusConfig + getStateDelegate func() sm.State + getValidatorsDelegate func() (int64, []*types.Validator) + getLastHeightDelegate func() int64 + getRoundStateDeepCopyDelegate func() *cstypes.RoundState + getRoundStateSimpleDelegate func() cstypes.RoundStateSimple +) + +type mockConsensus struct { + getConfigDeepCopyFn getConfigDeepCopyDelegate + getStateFn getStateDelegate + getValidatorsFn getValidatorsDelegate + getLastHeightFn getLastHeightDelegate + getRoundStateDeepCopyFn getRoundStateDeepCopyDelegate + getRoundStateSimpleFn getRoundStateSimpleDelegate +} + +func (m *mockConsensus) GetConfigDeepCopy() *cnscfg.ConsensusConfig { + if m.getConfigDeepCopyFn != nil { + return m.getConfigDeepCopyFn() + } + + return nil +} + +func (m *mockConsensus) GetState() sm.State { + if m.getStateFn != nil { + return m.getStateFn() + } + + return sm.State{} +} + +func (m *mockConsensus) GetValidators() (int64, []*types.Validator) { + if m.getValidatorsFn != nil { + return m.getValidatorsFn() + } + + return 0, nil +} + +func (m *mockConsensus) GetLastHeight() int64 { + if m.getLastHeightFn != nil { + return m.getLastHeightFn() + } + + return 0 +} + +func (m *mockConsensus) GetRoundStateDeepCopy() *cstypes.RoundState { + if m.getRoundStateDeepCopyFn != nil { + return m.getRoundStateDeepCopyFn() + } + + return nil +} + +func (m *mockConsensus) GetRoundStateSimple() cstypes.RoundStateSimple { + if m.getRoundStateSimpleFn != nil { + return m.getRoundStateSimpleFn() + } + + return cstypes.RoundStateSimple{} +} diff --git a/tm2/pkg/bft/rpc/core/consensus/types.go b/tm2/pkg/bft/rpc/core/consensus/types.go new file mode 100644 index 00000000000..11827ead2e1 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/consensus/types.go @@ -0,0 +1,29 @@ +package consensus + +import ( + cnscfg "github.com/gnolang/gno/tm2/pkg/bft/consensus/config" + cstypes "github.com/gnolang/gno/tm2/pkg/bft/consensus/types" + sm "github.com/gnolang/gno/tm2/pkg/bft/state" + "github.com/gnolang/gno/tm2/pkg/bft/types" +) + +// Consensus exposes read-only access to consensus state for RPC handlers +type Consensus interface { + // GetConfigDeepCopy returns a deep copy of the current consensus config + GetConfigDeepCopy() *cnscfg.ConsensusConfig + + // GetState returns a snapshot of the current consensus state + GetState() sm.State + + // GetValidators returns the height and validator set for that height + GetValidators() (int64, []*types.Validator) + + // GetLastHeight returns the last block height known to consensus + GetLastHeight() int64 + + // GetRoundStateDeepCopy returns a deep copy of the full round state + GetRoundStateDeepCopy() *cstypes.RoundState + + // GetRoundStateSimple returns a concise summary of the round state + GetRoundStateSimple() cstypes.RoundStateSimple +} diff --git a/tm2/pkg/bft/rpc/core/dev.go b/tm2/pkg/bft/rpc/core/dev.go deleted file mode 100644 index a11c931b123..00000000000 --- a/tm2/pkg/bft/rpc/core/dev.go +++ /dev/null @@ -1,56 +0,0 @@ -package core - -import ( - "os" - "runtime/pprof" - - ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - rpctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" -) - -// UnsafeFlushMempool removes all transactions from the mempool. -func UnsafeFlushMempool(ctx *rpctypes.Context) (*ctypes.ResultUnsafeFlushMempool, error) { - mempool.Flush() - return &ctypes.ResultUnsafeFlushMempool{}, nil -} - -var profFile *os.File - -// UnsafeStartCPUProfiler starts a pprof profiler using the given filename. -func UnsafeStartCPUProfiler(ctx *rpctypes.Context, filename string) (*ctypes.ResultUnsafeProfile, error) { - var err error - profFile, err = os.Create(filename) - if err != nil { - return nil, err - } - err = pprof.StartCPUProfile(profFile) - if err != nil { - return nil, err - } - return &ctypes.ResultUnsafeProfile{}, nil -} - -// UnsafeStopCPUProfiler stops the running pprof profiler. -func UnsafeStopCPUProfiler(ctx *rpctypes.Context) (*ctypes.ResultUnsafeProfile, error) { - pprof.StopCPUProfile() - if err := profFile.Close(); err != nil { - return nil, err - } - return &ctypes.ResultUnsafeProfile{}, nil -} - -// UnsafeWriteHeapProfile dumps a heap profile to the given filename. -func UnsafeWriteHeapProfile(ctx *rpctypes.Context, filename string) (*ctypes.ResultUnsafeProfile, error) { - memProfFile, err := os.Create(filename) - if err != nil { - return nil, err - } - if err := pprof.WriteHeapProfile(memProfFile); err != nil { - return nil, err - } - if err := memProfFile.Close(); err != nil { - return nil, err - } - - return &ctypes.ResultUnsafeProfile{}, nil -} diff --git a/tm2/pkg/bft/rpc/core/doc.go b/tm2/pkg/bft/rpc/core/doc.go deleted file mode 100644 index 2cdbe51fbb1..00000000000 --- a/tm2/pkg/bft/rpc/core/doc.go +++ /dev/null @@ -1,112 +0,0 @@ -/* -# Introduction - -Tendermint supports the following RPC protocols: - -* URI over HTTP -* JSONRPC over HTTP -* JSONRPC over websockets - -Tendermint RPC is built using our own RPC library which contains its own set of documentation and tests. -See it here: https://github.com/gnolang/gno/tm2/pkg/bft/tree/master/rpc/lib - -## Configuration - -RPC can be configured by tuning parameters under `[rpc]` table in the `$TMHOME/config/config.toml` file or by using the `--rpc.X` command-line flags. - -Default rpc listen address is `tcp://0.0.0.0:26657`. To set another address, set the `laddr` config parameter to desired value. -CORS (Cross-Origin Resource Sharing) can be enabled by setting `cors_allowed_origins`, `cors_allowed_methods`, `cors_allowed_headers` config parameters. - -## Arguments - -Arguments which expect strings or byte arrays may be passed as quoted strings, like `"abc"` or as `0x`-prefixed strings, like `0x616263`. - -## URI/HTTP - -```bash -curl 'localhost:26657/broadcast_tx_sync?tx="abc"' -``` - -> Response: - -```json - - { - "error": "", - "result": { - "hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF", - "log": "", - "data": "", - "code": "0" - }, - "id": "", - "jsonrpc": "2.0" - } - -``` - -## JSONRPC/HTTP - -JSONRPC requests can be POST'd to the root RPC endpoint via HTTP (e.g. `http://localhost:26657/`). - -```json - - { - "method": "broadcast_tx_sync", - "jsonrpc": "2.0", - "params": [ "abc" ], - "id": "dontcare" - } - -``` - -## JSONRPC/websockets - -JSONRPC requests can be made via websocket. The websocket endpoint is at `/websocket`, e.g. `localhost:26657/websocket`. - -## More Examples - -See the various bash tests using curl in `test/`, and examples using the `Go` API in `rpc/client/`. - -## Get the list - -An HTTP Get request to the root RPC endpoint shows a list of available endpoints. - -```bash -curl 'localhost:26657' -``` - -> Response: - -```plain -Available endpoints: -/abci_info -/dump_consensus_state -/genesis -/net_info -/num_unconfirmed_txs -/status -/health -/unconfirmed_txs -/unsafe_flush_mempool -/unsafe_stop_cpu_profiler -/validators - -Endpoints that require arguments: -/abci_query?path=_&data=_&prove=_ -/block?height=_ -/blockchain?minHeight=_&maxHeight=_ -/broadcast_tx_async?tx=_ -/broadcast_tx_commit?tx=_ -/broadcast_tx_sync?tx=_ -/commit?height=_ -/dial_seeds?seeds=_ -/dial_persistent_peers?persistent_peers=_ -/tx?hash=_&prove=_ -/unsafe_start_cpu_profiler?filename=_ -/unsafe_write_heap_profile?filename=_ -``` - -# Endpoints -*/ -package core diff --git a/tm2/pkg/bft/rpc/core/doc_template.txt b/tm2/pkg/bft/rpc/core/doc_template.txt deleted file mode 100644 index 896d0c271f9..00000000000 --- a/tm2/pkg/bft/rpc/core/doc_template.txt +++ /dev/null @@ -1,8 +0,0 @@ -{{with .PDoc}} -{{comment_md .Doc}} -{{example_html $ ""}} - -{{range .Funcs}}{{$name_html := html .Name}}## [{{$name_html}}]({{posLink_url $ .Decl}}) -{{comment_md .Doc}}{{end}} -{{end}} ---- diff --git a/tm2/pkg/bft/rpc/core/health.go b/tm2/pkg/bft/rpc/core/health.go deleted file mode 100644 index f036ba9b896..00000000000 --- a/tm2/pkg/bft/rpc/core/health.go +++ /dev/null @@ -1,41 +0,0 @@ -package core - -import ( - ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - rpctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" -) - -// Get node health. Returns empty result (200 OK) on success, no response - in -// case of an error. -// -// ```shell -// curl 'localhost:26657/health' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// result, err := client.Health() -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// -// { -// "error": "", -// "result": {}, -// "id": "", -// "jsonrpc": "2.0" -// } -// -// ``` -func Health(ctx *rpctypes.Context) (*ctypes.ResultHealth, error) { - return &ctypes.ResultHealth{}, nil -} diff --git a/tm2/pkg/bft/rpc/core/health/health.go b/tm2/pkg/bft/rpc/core/health/health.go new file mode 100644 index 00000000000..b26fcb549e0 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/health/health.go @@ -0,0 +1,19 @@ +package health + +import ( + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/metadata" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" +) + +// HealthHandler fetches the node health. +// Returns empty result (200 OK) on success, no response - in case of an error +// +// No params +func HealthHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + if len(p) > 0 { + return nil, spec.GenerateInvalidParamError(1) + } + + return &ctypes.ResultHealth{}, nil +} diff --git a/tm2/pkg/bft/rpc/core/health/health_test.go b/tm2/pkg/bft/rpc/core/health/health_test.go new file mode 100644 index 00000000000..628b79a3b12 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/health/health_test.go @@ -0,0 +1,37 @@ +package health + +import ( + "testing" + + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestHandler_Health(t *testing.T) { + t.Parallel() + + t.Run("Unexpected params", func(t *testing.T) { + t.Parallel() + + res, err := HealthHandler(nil, []any{"extra"}) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Valid health status", func(t *testing.T) { + t.Parallel() + + res, err := HealthHandler(nil, nil) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultHealth) + require.True(t, ok) + + assert.Equal(t, &ctypes.ResultHealth{}, result) + }) +} diff --git a/tm2/pkg/bft/rpc/core/mempool.go b/tm2/pkg/bft/rpc/core/mempool.go deleted file mode 100644 index ba3750574ce..00000000000 --- a/tm2/pkg/bft/rpc/core/mempool.go +++ /dev/null @@ -1,464 +0,0 @@ -package core - -import ( - "fmt" - "sync" - "time" - - abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" - ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - rpctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" - "github.com/gnolang/gno/tm2/pkg/bft/types" - "github.com/gnolang/gno/tm2/pkg/errors" - "github.com/gnolang/gno/tm2/pkg/events" - "github.com/gnolang/gno/tm2/pkg/random" - "github.com/gnolang/gno/tm2/pkg/service" -) - -// ----------------------------------------------------------------------------- -// NOTE: tx should be signed, but this is only checked at the app level (not by Tendermint!) - -// Returns right away, with no response. Does not wait for CheckTx nor -// DeliverTx results. -// -// If you want to be sure that the transaction is included in a block, you can -// subscribe for the result using JSONRPC via a websocket. See -// https://tendermint.com/docs/app-dev/subscribing-to-events-via-websocket.html -// If you haven't received anything after a couple of blocks, resend it. If the -// same happens again, send it to some other node. A few reasons why it could -// happen: -// -// 1. malicious node can drop or pretend it had committed your tx -// 2. malicious proposer (not necessary the one you're communicating with) can -// drop transactions, which might become valid in the future -// (https://github.com/gnolang/gno/tm2/pkg/bft/issues/3322) -// 3. node can be offline -// -// Please refer to -// https://tendermint.com/docs/tendermint-core/using-tendermint.html#formatting -// for formatting/encoding rules. -// -// ```shell -// curl 'localhost:26657/broadcast_tx_async?tx="123"' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// result, err := client.BroadcastTxAsync("123") -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// -// { -// "error": "", -// "result": { -// "hash": "E39AAB7A537ABAA237831742DCE1117F187C3C52", -// "log": "", -// "data": "", -// "code": "0" -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// -// ``` -// -// ### Query Parameters -// -// | Parameter | Type | Default | Required | Description | -// |-----------+------+---------+----------+-----------------| -// | tx | Tx | nil | true | The transaction | -func BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - err := mempool.CheckTx(tx, nil) - if err != nil { - return nil, err - } - return &ctypes.ResultBroadcastTx{Hash: tx.Hash()}, nil -} - -// Returns with the response from CheckTx. Does not wait for DeliverTx result. -// -// If you want to be sure that the transaction is included in a block, you can -// subscribe for the result using JSONRPC via a websocket. See -// https://tendermint.com/docs/app-dev/subscribing-to-events-via-websocket.html -// If you haven't received anything after a couple of blocks, resend it. If the -// same happens again, send it to some other node. A few reasons why it could -// happen: -// -// 1. malicious node can drop or pretend it had committed your tx -// 2. malicious proposer (not necessary the one you're communicating with) can -// drop transactions, which might become valid in the future -// (https://github.com/gnolang/gno/tm2/pkg/bft/issues/3322) -// -// Please refer to -// https://tendermint.com/docs/tendermint-core/using-tendermint.html#formatting -// for formatting/encoding rules. -// -// ```shell -// curl 'localhost:26657/broadcast_tx_sync?tx="456"' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// result, err := client.BroadcastTxSync("456") -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// -// { -// "jsonrpc": "2.0", -// "id": "", -// "result": { -// "code": "0", -// "data": "", -// "log": "", -// "hash": "0D33F2F03A5234F38706E43004489E061AC40A2E" -// }, -// "error": "" -// } -// -// ``` -// -// ### Query Parameters -// -// | Parameter | Type | Default | Required | Description | -// |-----------+------+---------+----------+-----------------| -// | tx | Tx | nil | true | The transaction | -func BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - resCh := make(chan abci.Response, 1) - err := mempool.CheckTx(tx, func(res abci.Response) { - resCh <- res - }) - if err != nil { - return nil, err - } - res := <-resCh - r := res.(abci.ResponseCheckTx) - return &ctypes.ResultBroadcastTx{ - Error: r.Error, - Data: r.Data, - Log: r.Log, - Hash: tx.Hash(), - }, nil -} - -// Returns with the responses from CheckTx and DeliverTx. -// -// IMPORTANT: use only for testing and development. In production, use -// BroadcastTxSync or BroadcastTxAsync. You can subscribe for the transaction -// result using JSONRPC via a websocket. See -// https://tendermint.com/docs/app-dev/subscribing-to-events-via-websocket.html -// -// CONTRACT: only returns error if mempool.CheckTx() errs or if we timeout -// waiting for tx to commit. -// -// If CheckTx or DeliverTx fail, no error will be returned, but the returned result -// will contain a non-OK ABCI code. -// -// Please refer to -// https://tendermint.com/docs/tendermint-core/using-tendermint.html#formatting -// for formatting/encoding rules. -// -// ```shell -// curl 'localhost:26657/broadcast_tx_commit?tx="789"' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// result, err := client.BroadcastTxCommit("789") -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// -// { -// "error": "", -// "result": { -// "height": "26682", -// "hash": "75CA0F856A4DA078FC4911580360E70CEFB2EBEE", -// "deliver_tx": { -// "log": "", -// "data": "", -// "code": "0" -// }, -// "check_tx": { -// "log": "", -// "data": "", -// "code": "0" -// } -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// -// ``` -// -// ### Query Parameters -// -// | Parameter | Type | Default | Required | Description | -// |-----------+------+---------+----------+-----------------| -// | tx | Tx | nil | true | The transaction | -func BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - // Broadcast tx and wait for CheckTx result - checkTxResCh := make(chan abci.Response, 1) - err := mempool.CheckTx(tx, func(res abci.Response) { - checkTxResCh <- res - }) - if err != nil { - logger.Error("Error on broadcastTxCommit", "err", err) - return nil, fmt.Errorf("error on broadcastTxCommit: %w", err) - } - checkTxResMsg := <-checkTxResCh - checkTxRes := checkTxResMsg.(abci.ResponseCheckTx) - if checkTxRes.Error != nil { - return &ctypes.ResultBroadcastTxCommit{ - CheckTx: checkTxRes, - DeliverTx: abci.ResponseDeliverTx{}, - Hash: tx.Hash(), - }, nil - } - - // Wait for the tx to be included in a block or timeout. - txRes, err := gTxDispatcher.getTxResult(tx, nil) - if err != nil { - return nil, err - } - return &ctypes.ResultBroadcastTxCommit{ - CheckTx: checkTxRes, - DeliverTx: txRes.Response, - Hash: tx.Hash(), - Height: txRes.Height, - }, nil -} - -// Get unconfirmed transactions (maximum ?limit entries) including their number. -// -// ```shell -// curl 'localhost:26657/unconfirmed_txs' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// result, err := client.UnconfirmedTxs() -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// -// { -// "result" : { -// "txs" : [], -// "total_bytes" : "0", -// "n_txs" : "0", -// "total" : "0" -// }, -// "jsonrpc" : "2.0", -// "id" : "" -// } -// -// ``` -// -// ### Query Parameters -// -// | Parameter | Type | Default | Required | Description | -// |-----------+------+---------+----------+--------------------------------------| -// | limit | int | 30 | false | Maximum number of entries (max: 100) | -// ``` -func UnconfirmedTxs(ctx *rpctypes.Context, limit int) (*ctypes.ResultUnconfirmedTxs, error) { - // reuse per_page validator - limit = validatePerPage(limit) - - txs := mempool.ReapMaxTxs(limit) - return &ctypes.ResultUnconfirmedTxs{ - Count: len(txs), - Total: mempool.Size(), - TotalBytes: mempool.TxsBytes(), - Txs: txs, - }, nil -} - -// Get number of unconfirmed transactions. -// -// ```shell -// curl 'localhost:26657/num_unconfirmed_txs' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// if err != nil { -// // handle error -// } -// defer client.Stop() -// result, err := client.UnconfirmedTxs() -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// -// { -// "jsonrpc" : "2.0", -// "id" : "", -// "result" : { -// "n_txs" : "0", -// "total_bytes" : "0", -// "total" : "0" -// "txs" : null, -// } -// } -// -// ``` -func NumUnconfirmedTxs(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) { - return &ctypes.ResultUnconfirmedTxs{ - Count: mempool.Size(), - Total: mempool.Size(), - TotalBytes: mempool.TxsBytes(), - }, nil -} - -// ---------------------------------------- -// txListener - -// NOTE: txDispatcher doesn't handle any throttling or resource management. -// The RPC websockets system is expected to throttle requests. -type txDispatcher struct { - service.BaseService - evsw events.EventSwitch - listenerID string - sub <-chan events.Event - - mtx sync.Mutex - waiters map[string]*txWaiter // string(types.Tx) -> *txWaiter -} - -func newTxDispatcher(evsw events.EventSwitch) *txDispatcher { - listenerID := fmt.Sprintf("txDispatcher#%v", random.RandStr(6)) - sub := events.SubscribeToEvent(evsw, listenerID, types.EventTx{}) - - td := &txDispatcher{ - evsw: evsw, - listenerID: listenerID, - sub: sub, - waiters: make(map[string]*txWaiter), - } - td.BaseService = *service.NewBaseService(nil, "txDispatcher", td) - err := td.Start() - if err != nil { - panic(err) - } - return td -} - -func (td *txDispatcher) OnStart() error { - go td.listenRoutine() - return nil -} - -func (td *txDispatcher) OnStop() { - td.evsw.RemoveListener(td.listenerID) -} - -func (td *txDispatcher) listenRoutine() { - for { - select { - case event, ok := <-td.sub: - if !ok { - td.Stop() - panic("txDispatcher subscription unexpectedly closed") - } - txEvent := event.(types.EventTx) - td.notifyTxEvent(txEvent) - case <-td.Quit(): - return - } - } -} - -func (td *txDispatcher) notifyTxEvent(txEvent types.EventTx) { - td.mtx.Lock() - defer td.mtx.Unlock() - - tx := txEvent.Result.Tx - waiter, ok := td.waiters[string(tx)] - if !ok { - return // nothing to do - } else { - waiter.txRes = txEvent.Result - close(waiter.waitCh) - } -} - -// blocking -// If the tx is already being waited on, returns the result from the original request. -// Upon result or timeout, the tx is forgotten from txDispatcher, and can be re-requested. -// If the tx times out, an error is returned. -// Quit can optionally be provided to terminate early (e.g. if the caller disconnects). -func (td *txDispatcher) getTxResult(tx types.Tx, quit chan struct{}) (types.TxResult, error) { - // Get or create waiter. - td.mtx.Lock() - waiter, ok := td.waiters[string(tx)] - if !ok { - waiter = newTxWaiter(tx) - td.waiters[string(tx)] = waiter - } - td.mtx.Unlock() - - select { - case <-waiter.waitCh: - return waiter.txRes, nil - case <-waiter.timeCh: - return types.TxResult{}, errors.New("request timeout") - case <-quit: - return types.TxResult{}, errors.New("caller quit") - } -} - -type txWaiter struct { - tx types.Tx - waitCh chan struct{} - timeCh <-chan time.Time - txRes types.TxResult -} - -func newTxWaiter(tx types.Tx) *txWaiter { - return &txWaiter{ - tx: tx, - waitCh: make(chan struct{}), - timeCh: time.After(config.TimeoutBroadcastTxCommit), - } -} diff --git a/tm2/pkg/bft/rpc/core/mempool/dispatcher.go b/tm2/pkg/bft/rpc/core/mempool/dispatcher.go new file mode 100644 index 00000000000..846fe28eb06 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/mempool/dispatcher.go @@ -0,0 +1,140 @@ +package mempool + +import ( + "errors" + "fmt" + "sync" + "time" + + "github.com/gnolang/gno/tm2/pkg/bft/types" + "github.com/gnolang/gno/tm2/pkg/events" + "github.com/gnolang/gno/tm2/pkg/random" + "github.com/gnolang/gno/tm2/pkg/service" +) + +// This code was moved over from the old Tendermint RPC implementation, and slightly cleaned up. +// If time allows, we should remove it altogether, and figure out a better mechanism for transaction waiting +type txDispatcher struct { + service.BaseService + + evsw events.EventSwitch + listenerID string + sub <-chan events.Event + + timeout time.Duration + + mtx sync.Mutex + waiters map[string]*txWaiter // string(tx) -> waiter shared by all callers +} + +func newTxDispatcher(evsw events.EventSwitch, timeout time.Duration) *txDispatcher { + listenerID := fmt.Sprintf("txDispatcher#%v", random.RandStr(6)) + sub := events.SubscribeToEvent(evsw, listenerID, types.EventTx{}) + + td := &txDispatcher{ + evsw: evsw, + listenerID: listenerID, + sub: sub, + timeout: timeout, + waiters: make(map[string]*txWaiter), + } + + td.BaseService = *service.NewBaseService(nil, "txDispatcher", td) + + if err := td.Start(); err != nil { + panic(err) + } + + return td +} + +func (td *txDispatcher) OnStart() error { + go td.listenRoutine() + return nil +} + +func (td *txDispatcher) OnStop() { + td.evsw.RemoveListener(td.listenerID) +} + +func (td *txDispatcher) listenRoutine() { + for { + select { + case event, ok := <-td.sub: + if !ok { + td.Stop() + panic("txDispatcher subscription unexpectedly closed") + } + + txEvent := event.(types.EventTx) + td.notifyTxEvent(txEvent) + + case <-td.Quit(): + return + } + } +} + +func (td *txDispatcher) notifyTxEvent(txEvent types.EventTx) { + key := string(txEvent.Result.Tx) + + td.mtx.Lock() + waiter, ok := td.waiters[key] + if !ok { + td.mtx.Unlock() + return + } + + delete(td.waiters, key) + + waiter.res = txEvent.Result + close(waiter.done) + + td.mtx.Unlock() +} + +// getTxResult blocks until: +// - the tx result arrives from events, OR +// - the dispatcher timeout expires, OR +// - the caller's quit channel fires (if non-nil). +// +// All callers waiting on the same tx share the same waiter and get the same result +func (td *txDispatcher) getTxResult(tx types.Tx, quit chan struct{}) (types.TxResult, error) { + key := string(tx) + + td.mtx.Lock() + waiter, ok := td.waiters[key] + if !ok { + waiter = newTxWaiter() + td.waiters[key] = waiter + } + td.mtx.Unlock() + + timeout := time.After(td.timeout) + + select { + case <-waiter.done: + return waiter.res, nil + + case <-timeout: + td.mtx.Lock() + delete(td.waiters, key) + td.mtx.Unlock() + + return types.TxResult{}, errors.New("request timeout") + + case <-quit: + return types.TxResult{}, errors.New("caller quit") + } +} + +type txWaiter struct { + done chan struct{} + res types.TxResult +} + +func newTxWaiter() *txWaiter { + return &txWaiter{ + done: make(chan struct{}), + } +} diff --git a/tm2/pkg/bft/rpc/core/mempool/mempool.go b/tm2/pkg/bft/rpc/core/mempool/mempool.go new file mode 100644 index 00000000000..d0b3dbc973f --- /dev/null +++ b/tm2/pkg/bft/rpc/core/mempool/mempool.go @@ -0,0 +1,177 @@ +package mempool + +import ( + "fmt" + "time" + + abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" + coreparams "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/params" + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/utils" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/metadata" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" + "github.com/gnolang/gno/tm2/pkg/bft/types" + "github.com/gnolang/gno/tm2/pkg/events" +) + +// Handler is the mempool RPC handler +type Handler struct { + mempool Mempool + dispatcher *txDispatcher +} + +// NewHandler creates a new instance of the mempool RPC handler +func NewHandler( + mp Mempool, + evsw events.EventSwitch, + timeoutBroadcastTxCommit time.Duration, // TODO use config? +) *Handler { + return &Handler{ + mempool: mp, + dispatcher: newTxDispatcher(evsw, timeoutBroadcastTxCommit), + } +} + +// BroadcastTxAsyncHandler broadcasts the tx and returns right away, with no response. +// Does not wait for CheckTx nor DeliverTx results +// +// Params: +// - tx []byte (required) +func (h *Handler) BroadcastTxAsyncHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + const idxTx = 0 + + rawTx, err := coreparams.AsBytes(p, idxTx, true) + if err != nil { + return nil, err + } + + tx := types.Tx(rawTx) + + if checkErr := h.mempool.CheckTx(tx, nil); checkErr != nil { + return nil, spec.GenerateResponseError(checkErr) + } + + return &ctypes.ResultBroadcastTx{ + Hash: tx.Hash(), + }, nil +} + +// BroadcastTxSyncHandler broadcasts the tx and returns with the response from CheckTx. +// Does not wait for DeliverTx result +// +// Params: +// - tx []byte (required) +func (h *Handler) BroadcastTxSyncHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + const idxTx = 0 + + rawTx, err := coreparams.AsBytes(p, idxTx, true) + if err != nil { + return nil, err + } + + tx := types.Tx(rawTx) + + resCh := make(chan abci.Response, 1) + if checkErr := h.mempool.CheckTx(tx, func(res abci.Response) { + resCh <- res + }); checkErr != nil { + return nil, spec.GenerateResponseError(checkErr) + } + + res := <-resCh + r := res.(abci.ResponseCheckTx) + + return &ctypes.ResultBroadcastTx{ + Error: r.Error, + Data: r.Data, + Log: r.Log, + Hash: tx.Hash(), + }, nil +} + +// BroadcastTxCommitHandler broadcasts the tx and returns with the responses from CheckTx and DeliverTx. +// +// Params: +// - tx []byte (required) +func (h *Handler) BroadcastTxCommitHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + const idxTx = 0 + + rawTx, err := coreparams.AsBytes(p, idxTx, true) + if err != nil { + return nil, err + } + + tx := types.Tx(rawTx) + + checkTxResCh := make(chan abci.Response, 1) + if checkErr := h.mempool.CheckTx(tx, func(res abci.Response) { + checkTxResCh <- res + }); checkErr != nil { + return nil, spec.GenerateResponseError( + fmt.Errorf("error on BroadcastTxCommit: %w", checkErr), + ) + } + + checkTxResMsg := <-checkTxResCh + checkTxRes := checkTxResMsg.(abci.ResponseCheckTx) + + if checkTxRes.Error != nil { + return &ctypes.ResultBroadcastTxCommit{ + CheckTx: checkTxRes, + DeliverTx: abci.ResponseDeliverTx{}, + Hash: tx.Hash(), + }, nil + } + + txRes, txErr := h.dispatcher.getTxResult(tx, nil) + if txErr != nil { + return nil, spec.GenerateResponseError(txErr) + } + + return &ctypes.ResultBroadcastTxCommit{ + CheckTx: checkTxRes, + DeliverTx: txRes.Response, + Hash: tx.Hash(), + Height: txRes.Height, + }, nil +} + +// UnconfirmedTxsHandler fetches unconfirmed transactions (maximum ?limit entries) including their number. +// +// Params: +// - limit int64 (optional, default 30, max 100) +func (h *Handler) UnconfirmedTxsHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + const idxLimit = 0 + + limit64, err := coreparams.AsInt64(p, idxLimit) + if err != nil { + return nil, err + } + + var ( + limit = utils.ValidatePerPage(int(limit64)) + txs = h.mempool.ReapMaxTxs(limit) + ) + + return &ctypes.ResultUnconfirmedTxs{ + Count: len(txs), + Total: h.mempool.Size(), + TotalBytes: h.mempool.TxsBytes(), + Txs: txs, + }, nil +} + +// NumUnconfirmedTxsHandler fetches the number of unconfirmed transactions. +// +// No params +func (h *Handler) NumUnconfirmedTxsHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + if len(p) > 0 { + return nil, spec.GenerateInvalidParamError(1) + } + + return &ctypes.ResultUnconfirmedTxs{ + Count: h.mempool.Size(), + Total: h.mempool.Size(), + TotalBytes: h.mempool.TxsBytes(), + }, nil +} diff --git a/tm2/pkg/bft/rpc/core/mempool/mempool_test.go b/tm2/pkg/bft/rpc/core/mempool/mempool_test.go new file mode 100644 index 00000000000..05ccb3d0080 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/mempool/mempool_test.go @@ -0,0 +1,459 @@ +package mempool + +import ( + "errors" + "testing" + "time" + + abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" + "github.com/gnolang/gno/tm2/pkg/bft/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestHandler_BroadcastTxAsyncHandler(t *testing.T) { + t.Parallel() + + t.Run("Missing tx param", func(t *testing.T) { + t.Parallel() + + h := &Handler{ + mempool: &mockMempool{}, + } + + res, err := h.BroadcastTxAsyncHandler(nil, nil) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("CheckTx error", func(t *testing.T) { + t.Parallel() + + var ( + checkErr = errors.New("mempool error") + mp = &mockMempool{ + checkTxFn: func(tx types.Tx, cb func(abci.Response)) error { + return checkErr + }, + } + + h = &Handler{ + mempool: mp, + } + + txBytes = []byte("tx-bytes") + params = []any{txBytes} + ) + + res, err := h.BroadcastTxAsyncHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + assert.Contains(t, err.Message, checkErr.Error()) + }) + + t.Run("Valid broadcast", func(t *testing.T) { + t.Parallel() + + var ( + capturedTx types.Tx + mp = &mockMempool{ + checkTxFn: func(tx types.Tx, cb func(abci.Response)) error { + capturedTx = tx + return nil + }, + } + + h = &Handler{ + mempool: mp, + } + + txBytes = []byte("some-tx") + params = []any{txBytes} + ) + + res, err := h.BroadcastTxAsyncHandler(nil, params) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultBroadcastTx) + require.True(t, ok) + + expectedHash := types.Tx(txBytes).Hash() + assert.Equal(t, expectedHash, result.Hash) + assert.Equal(t, types.Tx(txBytes), capturedTx) + }) +} + +func TestHandler_BroadcastTxSyncHandler(t *testing.T) { + t.Parallel() + + t.Run("Missing tx param", func(t *testing.T) { + t.Parallel() + + h := &Handler{ + mempool: &mockMempool{}, + } + + res, err := h.BroadcastTxSyncHandler(nil, nil) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("CheckTx error", func(t *testing.T) { + t.Parallel() + + var ( + checkErr = errors.New("sync mempool error") + + mp = &mockMempool{ + checkTxFn: func(tx types.Tx, cb func(abci.Response)) error { + return checkErr + }, + } + + h = &Handler{ + mempool: mp, + } + + params = []any{[]byte("tx")} + ) + + res, err := h.BroadcastTxSyncHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + assert.Contains(t, err.Message, checkErr.Error()) + }) + + t.Run("Valid CheckTx response", func(t *testing.T) { + t.Parallel() + + var ( + txBytes = []byte("sync-tx") + tx = types.Tx(txBytes) + + checkResp = abci.ResponseCheckTx{ + ResponseBase: abci.ResponseBase{ + Data: []byte("data"), + Log: "log-message", + Error: nil, + }, + } + + mp = &mockMempool{ + checkTxFn: func(txArg types.Tx, cb func(abci.Response)) error { + assert.Equal(t, tx, txArg) + + cb(checkResp) + return nil + }, + } + + h = &Handler{ + mempool: mp, + } + + params = []any{txBytes} + ) + + res, err := h.BroadcastTxSyncHandler(nil, params) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultBroadcastTx) + require.True(t, ok) + + assert.Equal(t, checkResp.Error, result.Error) + assert.Equal(t, checkResp.Data, result.Data) + assert.Equal(t, checkResp.Log, result.Log) + assert.Equal(t, tx.Hash(), result.Hash) + }) +} + +func TestHandler_BroadcastTxCommitHandler(t *testing.T) { + t.Parallel() + + t.Run("Missing tx param", func(t *testing.T) { + t.Parallel() + + h := &Handler{ + mempool: &mockMempool{}, + } + + res, err := h.BroadcastTxCommitHandler(nil, nil) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("CheckTx call error", func(t *testing.T) { + t.Parallel() + + var ( + checkErr = errors.New("commit mempool error") + mp = &mockMempool{ + checkTxFn: func(tx types.Tx, cb func(abci.Response)) error { + return checkErr + }, + } + h = &Handler{ + mempool: mp, + } + + params = []any{[]byte("tx")} + ) + + res, err := h.BroadcastTxCommitHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + assert.Contains(t, err.Message, "error on BroadcastTxCommit") + assert.Contains(t, err.Message, checkErr.Error()) + }) + + t.Run("CheckTx response error", func(t *testing.T) { + t.Parallel() + + var ( + txBytes = []byte("commit-tx") + tx = types.Tx(txBytes) + + checkResp = abci.ResponseCheckTx{ + ResponseBase: abci.ResponseBase{ + Error: testABCIError{msg: "check failed"}, + Data: []byte("ignored"), + Log: "ignored", + }, + } + + mp = &mockMempool{ + checkTxFn: func(txArg types.Tx, cb func(abci.Response)) error { + assert.Equal(t, tx, txArg) + cb(checkResp) + return nil + }, + } + + h = &Handler{ + mempool: mp, + dispatcher: nil, // explicit + } + + params = []any{txBytes} + ) + + res, err := h.BroadcastTxCommitHandler(nil, params) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultBroadcastTxCommit) + require.True(t, ok) + + assert.Equal(t, checkResp, result.CheckTx) + assert.Equal(t, abci.ResponseDeliverTx{}, result.DeliverTx) + assert.Equal(t, tx.Hash(), result.Hash) + assert.Equal(t, int64(0), result.Height) + }) + + t.Run("Successful commit", func(t *testing.T) { + t.Parallel() + + var ( + txBytes = []byte("commit-success-tx") + tx = types.Tx(txBytes) + + checkResp = abci.ResponseCheckTx{ + ResponseBase: abci.ResponseBase{ + Error: nil, + Data: []byte("check-data"), + Log: "check-log", + }, + } + + expectedDeliver = abci.ResponseDeliverTx{ + ResponseBase: abci.ResponseBase{ + Data: []byte("deliver-data"), + Log: "deliver-log", + }, + } + expectedHeight = int64(42) + params = []any{txBytes} + ) + + waiter := newTxWaiter() + waiter.res = types.TxResult{ + Height: expectedHeight, + Response: expectedDeliver, + } + close(waiter.done) + + dispatcher := &txDispatcher{ + timeout: time.Minute, + waiters: map[string]*txWaiter{ + string(tx): waiter, + }, + } + + mp := &mockMempool{ + checkTxFn: func(txArg types.Tx, cb func(abci.Response)) error { + assert.Equal(t, tx, txArg) + cb(checkResp) + + return nil + }, + } + + h := &Handler{ + mempool: mp, + dispatcher: dispatcher, + } + + res, err := h.BroadcastTxCommitHandler(nil, params) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultBroadcastTxCommit) + require.True(t, ok) + + assert.Equal(t, checkResp, result.CheckTx) + assert.Equal(t, expectedDeliver, result.DeliverTx) + assert.Equal(t, expectedHeight, result.Height) + assert.Equal(t, tx.Hash(), result.Hash) + }) +} +func TestHandler_UnconfirmedTxsHandler(t *testing.T) { + t.Parallel() + + t.Run("Invalid limit param", func(t *testing.T) { + t.Parallel() + + var ( + h = &Handler{ + mempool: &mockMempool{}, + } + + params = []any{"not-an-int"} + ) + + res, err := h.UnconfirmedTxsHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Valid limit and mempool data", func(t *testing.T) { + t.Parallel() + + var ( + expectedTxs = []types.Tx{ + []byte("tx1"), + []byte("tx2"), + []byte("tx3"), + } + + mp = &mockMempool{ + reapMaxTxsFn: func(max int) []types.Tx { + assert.Equal(t, 10, max) + return expectedTxs + }, + sizeFn: func() int { + return 5 + }, + txsBytesFn: func() int64 { + return 123 + }, + } + + h = &Handler{ + mempool: mp, + } + + params = []any{int64(10)} + ) + + res, err := h.UnconfirmedTxsHandler(nil, params) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultUnconfirmedTxs) + require.True(t, ok) + + assert.Equal(t, len(expectedTxs), result.Count) + assert.Equal(t, 5, result.Total) + assert.Equal(t, int64(123), result.TotalBytes) + assert.Equal(t, expectedTxs, result.Txs) + }) +} + +func TestHandler_NumUnconfirmedTxsHandler(t *testing.T) { + t.Parallel() + + t.Run("Unexpected params", func(t *testing.T) { + t.Parallel() + + h := &Handler{ + mempool: &mockMempool{}, + } + + res, err := h.NumUnconfirmedTxsHandler(nil, []any{"extra"}) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Valid call", func(t *testing.T) { + t.Parallel() + + var ( + size = 7 + txsBytes = int64(456) + + mp = &mockMempool{ + sizeFn: func() int { + return size + }, + txsBytesFn: func() int64 { + return txsBytes + }, + } + + h = &Handler{ + mempool: mp, + } + ) + + res, err := h.NumUnconfirmedTxsHandler(nil, nil) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultUnconfirmedTxs) + require.True(t, ok) + + assert.Equal(t, size, result.Count) + assert.Equal(t, size, result.Total) + assert.Equal(t, txsBytes, result.TotalBytes) + }) +} + +type testABCIError struct { + msg string +} + +func (e testABCIError) Error() string { + return e.msg +} + +func (e testABCIError) AssertABCIError() {} diff --git a/tm2/pkg/bft/rpc/core/mempool/mock_test.go b/tm2/pkg/bft/rpc/core/mempool/mock_test.go new file mode 100644 index 00000000000..fe755c6f652 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/mempool/mock_test.go @@ -0,0 +1,52 @@ +package mempool + +import ( + abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" + "github.com/gnolang/gno/tm2/pkg/bft/types" +) + +type ( + checkTxDelegate func(tx types.Tx, cb func(abci.Response)) error + reapMaxTxsDelegate func(max int) []types.Tx + sizeDelegate func() int + txsBytesDelegate func() int64 +) + +type mockMempool struct { + checkTxFn checkTxDelegate + reapMaxTxsFn reapMaxTxsDelegate + sizeFn sizeDelegate + txsBytesFn txsBytesDelegate +} + +func (m *mockMempool) CheckTx(tx types.Tx, cb func(abci.Response)) error { + if m.checkTxFn != nil { + return m.checkTxFn(tx, cb) + } + + return nil +} + +func (m *mockMempool) ReapMaxTxs(max int) []types.Tx { + if m.reapMaxTxsFn != nil { + return m.reapMaxTxsFn(max) + } + + return nil +} + +func (m *mockMempool) Size() int { + if m.sizeFn != nil { + return m.sizeFn() + } + + return 0 +} + +func (m *mockMempool) TxsBytes() int64 { + if m.txsBytesFn != nil { + return m.txsBytesFn() + } + + return 0 +} diff --git a/tm2/pkg/bft/rpc/core/mempool/types.go b/tm2/pkg/bft/rpc/core/mempool/types.go new file mode 100644 index 00000000000..47b5525e670 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/mempool/types.go @@ -0,0 +1,22 @@ +package mempool + +import ( + abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" + "github.com/gnolang/gno/tm2/pkg/bft/types" +) + +// Mempool is the minimal mempool interface the RPC handler needs +type Mempool interface { + // CheckTx submits a transaction to the mempool. + // If cb is non-nil, it is called with the CheckTx ABCI response + CheckTx(tx types.Tx, cb func(abci.Response)) error + + // ReapMaxTxs returns up to max pending transactions from the mempool + ReapMaxTxs(max int) []types.Tx + + // Size returns the number of transactions currently in the mempool + Size() int + + // TxsBytes returns the total size (in bytes) of all transactions in the mempool + TxsBytes() int64 +} diff --git a/tm2/pkg/bft/rpc/core/mock/p2p.go b/tm2/pkg/bft/rpc/core/mock/p2p.go new file mode 100644 index 00000000000..640535f5339 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/mock/p2p.go @@ -0,0 +1,136 @@ +package mock + +import ( + "net" + + "github.com/gnolang/gno/tm2/pkg/p2p" + p2pTypes "github.com/gnolang/gno/tm2/pkg/p2p/types" +) + +type ( + PeersDelegate func() p2p.PeerSet +) + +type Peers struct { + PeersFn PeersDelegate +} + +func (m *Peers) Peers() p2p.PeerSet { + if m.PeersFn != nil { + return m.PeersFn() + } + + return nil +} + +type ( + AddDelegate func(p2p.PeerConn) + RemoveDelegate func(p2pTypes.ID) bool + HasDelegate func(p2pTypes.ID) bool + HasIPDelegate func(net.IP) bool + GetPeerDelegate func(p2pTypes.ID) p2p.PeerConn + ListDelegate func() []p2p.PeerConn + NumInboundDelegate func() uint64 + NumOutboundDelegate func() uint64 +) + +type PeerSet struct { + AddFn AddDelegate + RemoveFn RemoveDelegate + HasFn HasDelegate + HasIPFn HasIPDelegate + GetFn GetPeerDelegate + ListFn ListDelegate + NumInboundFn NumInboundDelegate + NumOutboundFn NumOutboundDelegate +} + +func (m *PeerSet) Add(peer p2p.PeerConn) { + if m.AddFn != nil { + m.AddFn(peer) + } +} + +func (m *PeerSet) Remove(key p2pTypes.ID) bool { + if m.RemoveFn != nil { + m.RemoveFn(key) + } + + return false +} + +func (m *PeerSet) Has(key p2pTypes.ID) bool { + if m.HasFn != nil { + return m.HasFn(key) + } + + return false +} + +func (m *PeerSet) Get(key p2pTypes.ID) p2p.PeerConn { + if m.GetFn != nil { + return m.GetFn(key) + } + + return nil +} + +func (m *PeerSet) List() []p2p.PeerConn { + if m.ListFn != nil { + return m.ListFn() + } + + return nil +} + +func (m *PeerSet) NumInbound() uint64 { + if m.NumInboundFn != nil { + return m.NumInboundFn() + } + + return 0 +} + +func (m *PeerSet) NumOutbound() uint64 { + if m.NumOutboundFn != nil { + return m.NumOutboundFn() + } + + return 0 +} + +type ( + ListenersDelegate func() []string + IsListeningDelegate func() bool + NodeInfoDelegate func() p2pTypes.NodeInfo +) + +type Transport struct { + ListenersFn ListenersDelegate + IsListeningFn IsListeningDelegate + NodeInfoFn NodeInfoDelegate +} + +func (m *Transport) Listeners() []string { + if m.ListenersFn != nil { + return m.ListenersFn() + } + + return nil +} + +func (m *Transport) IsListening() bool { + if m.IsListeningFn != nil { + return m.IsListeningFn() + } + + return false +} + +func (m *Transport) NodeInfo() p2pTypes.NodeInfo { + if m.NodeInfoFn != nil { + return m.NodeInfoFn() + } + + return p2pTypes.NodeInfo{} +} diff --git a/tm2/pkg/bft/rpc/core/net.go b/tm2/pkg/bft/rpc/core/net.go deleted file mode 100644 index f8839b7d91f..00000000000 --- a/tm2/pkg/bft/rpc/core/net.go +++ /dev/null @@ -1,229 +0,0 @@ -package core - -import ( - ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - rpctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" -) - -// Get network info. -// -// ```shell -// curl 'localhost:26657/net_info' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// info, err := client.NetInfo() -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// -// { -// "jsonrpc": "2.0", -// "id": "", -// "result": { -// "listening": true, -// "listeners": [ -// "Listener(@)" -// ], -// "n_peers": "3", -// "peers": [ -// { -// "node_info": { -// "protocol_version": { -// "p2p": "7", -// "block": "8", -// "app": "1" -// }, -// "id": "93529da3435c090d02251a050342b6a488d4ab56", -// "listen_addr": "tcp://0.0.0.0:26656", -// "network": "chain-RFo6qC", -// "version": "0.30.0", -// "channels": "4020212223303800", -// "moniker": "fc89e4ed23f2", -// "other": { -// "tx_index": "on", -// "rpc_address": "tcp://0.0.0.0:26657" -// } -// }, -// "is_outbound": true, -// "connection_status": { -// "Duration": "3475230558", -// "SendMonitor": { -// "Active": true, -// "Start": "2019-02-14T12:40:47.52Z", -// "Duration": "3480000000", -// "Idle": "240000000", -// "Bytes": "4512", -// "Samples": "9", -// "InstRate": "1338", -// "CurRate": "2046", -// "AvgRate": "1297", -// "PeakRate": "6570", -// "BytesRem": "0", -// "TimeRem": "0", -// "Progress": 0 -// }, -// "RecvMonitor": { -// "Active": true, -// "Start": "2019-02-14T12:40:47.52Z", -// "Duration": "3480000000", -// "Idle": "280000000", -// "Bytes": "4489", -// "Samples": "10", -// "InstRate": "1821", -// "CurRate": "1663", -// "AvgRate": "1290", -// "PeakRate": "5512", -// "BytesRem": "0", -// "TimeRem": "0", -// "Progress": 0 -// }, -// "Channels": [ -// { -// "ID": 48, -// "SendQueueCapacity": "1", -// "SendQueueSize": "0", -// "Priority": "5", -// "RecentlySent": "0" -// }, -// { -// "ID": 64, -// "SendQueueCapacity": "1000", -// "SendQueueSize": "0", -// "Priority": "10", -// "RecentlySent": "14" -// }, -// { -// "ID": 32, -// "SendQueueCapacity": "100", -// "SendQueueSize": "0", -// "Priority": "5", -// "RecentlySent": "619" -// }, -// { -// "ID": 33, -// "SendQueueCapacity": "100", -// "SendQueueSize": "0", -// "Priority": "10", -// "RecentlySent": "1363" -// }, -// { -// "ID": 34, -// "SendQueueCapacity": "100", -// "SendQueueSize": "0", -// "Priority": "5", -// "RecentlySent": "2145" -// }, -// { -// "ID": 35, -// "SendQueueCapacity": "2", -// "SendQueueSize": "0", -// "Priority": "1", -// "RecentlySent": "0" -// }, -// { -// "ID": 56, -// "SendQueueCapacity": "1", -// "SendQueueSize": "0", -// "Priority": "5", -// "RecentlySent": "0" -// }, -// { -// "ID": 0, -// "SendQueueCapacity": "10", -// "SendQueueSize": "0", -// "Priority": "1", -// "RecentlySent": "10" -// } -// ] -// }, -// "remote_ip": "192.167.10.3" -// }, -// ... -// } -// -// ``` -func NetInfo(_ *rpctypes.Context) (*ctypes.ResultNetInfo, error) { - var ( - set = p2pPeers.Peers() - out, in = set.NumOutbound(), set.NumInbound() - ) - - peers := make([]ctypes.Peer, 0, out+in) - for _, peer := range set.List() { - nodeInfo := peer.NodeInfo() - peers = append(peers, ctypes.Peer{ - NodeInfo: nodeInfo, - IsOutbound: peer.IsOutbound(), - ConnectionStatus: peer.Status(), - RemoteIP: peer.RemoteIP().String(), - }) - } - - return &ctypes.ResultNetInfo{ - Listening: p2pTransport.IsListening(), - Listeners: p2pTransport.Listeners(), - NPeers: len(peers), - Peers: peers, - }, nil -} - -// Get genesis file. -// -// ```shell -// curl 'localhost:26657/genesis' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// genesis, err := client.Genesis() -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// -// { -// "error": "", -// "result": { -// "genesis": { -// "app_hash": "", -// "validators": [ -// { -// "name": "", -// "power": "10", -// "pub_key": { -// "data": "68DFDA7E50F82946E7E8546BED37944A422CD1B831E70DF66BA3B8430593944D", -// "type": "ed25519" -// } -// } -// ], -// "chain_id": "test-chain-6UTNIN", -// "genesis_time": "2017-05-29T15:05:41.671Z" -// } -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// -// ``` -func Genesis(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) { - return &ctypes.ResultGenesis{Genesis: genDoc}, nil -} diff --git a/tm2/pkg/bft/rpc/core/net/net.go b/tm2/pkg/bft/rpc/core/net/net.go new file mode 100644 index 00000000000..9afbdb3ea8e --- /dev/null +++ b/tm2/pkg/bft/rpc/core/net/net.go @@ -0,0 +1,71 @@ +package net + +import ( + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/metadata" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" + "github.com/gnolang/gno/tm2/pkg/bft/types" +) + +// Handler is the net RPC handler +type Handler struct { + genesisDoc *types.GenesisDoc + + peers ctypes.Peers + transport ctypes.Transport +} + +// NewHandler creates a new instance of the net RPC handler +func NewHandler( + peers ctypes.Peers, + transport ctypes.Transport, + genesisDoc *types.GenesisDoc, +) *Handler { + return &Handler{ + peers: peers, + transport: transport, + genesisDoc: genesisDoc, + } +} + +// NetInfo fetches the current network info +// +// No params +func (h *Handler) NetInfo(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + if len(p) > 0 { + return nil, spec.GenerateInvalidParamError(1) + } + + var ( + set = h.peers.Peers() + out, in = set.NumOutbound(), set.NumInbound() + ) + + peers := make([]ctypes.Peer, 0, out+in) + for _, peer := range set.List() { + peers = append(peers, ctypes.Peer{ + NodeInfo: peer.NodeInfo(), + IsOutbound: peer.IsOutbound(), + ConnectionStatus: peer.Status(), + RemoteIP: peer.RemoteIP().String(), + }) + } + + return &ctypes.ResultNetInfo{ + Listening: h.transport.IsListening(), + Listeners: h.transport.Listeners(), + NPeers: len(peers), + Peers: peers, + }, nil +} + +// GenesisHandler fetches the genesis document (genesis.json) +// +// No params +func (h *Handler) GenesisHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + if len(p) > 0 { + return nil, spec.GenerateInvalidParamError(1) + } + + return &ctypes.ResultGenesis{Genesis: h.genesisDoc}, nil +} diff --git a/tm2/pkg/bft/rpc/core/net/net_test.go b/tm2/pkg/bft/rpc/core/net/net_test.go new file mode 100644 index 00000000000..744b9475743 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/net/net_test.go @@ -0,0 +1,108 @@ +package net + +import ( + "testing" + + "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/mock" + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" + "github.com/gnolang/gno/tm2/pkg/bft/types" + "github.com/gnolang/gno/tm2/pkg/p2p" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestHandler_NetInfo(t *testing.T) { + t.Parallel() + + t.Run("Unexpected params", func(t *testing.T) { + t.Parallel() + + h := &Handler{} + + res, err := h.NetInfo(nil, []any{"extra"}) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Valid, empty peer set", func(t *testing.T) { + t.Parallel() + + var ( + mockPeerSet = &mock.PeerSet{} + + mockPeers = &mock.Peers{ + PeersFn: func() p2p.PeerSet { + return mockPeerSet + }, + } + + expectedListeners = []string{"tcp://0.0.0.0:26656"} + + mockTransport = &mock.Transport{ + ListenersFn: func() []string { + return expectedListeners + }, + IsListeningFn: func() bool { + return true + }, + } + ) + + h := &Handler{ + peers: mockPeers, + transport: mockTransport, + } + + res, err := h.NetInfo(nil, nil) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultNetInfo) + require.True(t, ok) + + assert.True(t, result.Listening) + assert.Equal(t, expectedListeners, result.Listeners) + assert.Equal(t, 0, result.NPeers) + assert.Len(t, result.Peers, 0) + }) +} + +func TestHandler_GenesisHandler(t *testing.T) { + t.Parallel() + + t.Run("Unexpected params", func(t *testing.T) { + t.Parallel() + + h := &Handler{} + + res, err := h.GenesisHandler(nil, []any{"extra"}) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Returns genesis doc", func(t *testing.T) { + t.Parallel() + + genDoc := &types.GenesisDoc{ + ChainID: "test-chain", + } + + h := &Handler{ + genesisDoc: genDoc, + } + + res, err := h.GenesisHandler(nil, nil) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultGenesis) + require.True(t, ok) + + assert.Equal(t, genDoc, result.Genesis) + }) +} diff --git a/tm2/pkg/bft/rpc/core/params/params.go b/tm2/pkg/bft/rpc/core/params/params.go new file mode 100644 index 00000000000..14507d57fac --- /dev/null +++ b/tm2/pkg/bft/rpc/core/params/params.go @@ -0,0 +1,180 @@ +package params + +import ( + "encoding/hex" + "encoding/json" + "strconv" + "strings" + + "github.com/gnolang/gno/tm2/pkg/amino" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" +) + +func get(params []any, idx int) any { + if idx < 0 || idx >= len(params) { + return nil + } + + return params[idx] +} + +func AsString(params []any, idx int) (string, *spec.BaseJSONError) { + raw := get(params, idx) + if raw == nil { + return "", nil + } + + switch v := raw.(type) { + case string: + // Query params are strings already + return v, nil + default: + // For JSON-RPC POSTs, go through Amino to preserve legacy behavior + b, err := json.Marshal(v) + if err != nil { + return "", spec.GenerateInvalidParamError(idx) + } + + var out string + if err = amino.UnmarshalJSON(b, &out); err != nil { + return "", spec.GenerateInvalidParamError(idx) + } + + return out, nil + } +} + +func AsBytes(params []any, idx int, required bool) ([]byte, *spec.BaseJSONError) { + raw := get(params, idx) + if raw == nil { + if required { + return nil, spec.GenerateInvalidParamError(idx) + } + + return nil, nil + } + + switch v := raw.(type) { + case string: + // HTTP GET compatibility, 0x-prefixed hex + if strings.HasPrefix(v, "0x") { + data, err := hex.DecodeString(v[2:]) + if err != nil { + return nil, spec.GenerateInvalidParamError(idx) + } + + return data, nil + } + + // For everything else, Amino semantics for []byte + b, err := amino.MarshalJSON(v) + if err != nil { + return nil, spec.GenerateInvalidParamError(idx) + } + + var out []byte + if err := amino.UnmarshalJSON(b, &out); err != nil { + return nil, spec.GenerateInvalidParamError(idx) + } + + return out, nil + + default: + // For JSON-RPC POSTs, the value is already decoded by encoding/json + b, err := json.Marshal(v) + if err != nil { + return nil, spec.GenerateInvalidParamError(idx) + } + + var out []byte + if err := amino.UnmarshalJSON(b, &out); err != nil { + return nil, spec.GenerateInvalidParamError(idx) + } + + return out, nil + } +} + +func AsInt64(params []any, idx int) (int64, *spec.BaseJSONError) { + raw := get(params, idx) + if raw == nil { + return 0, nil + } + + switch v := raw.(type) { + case int64: + return v, nil + + case int: + return int64(v), nil + + case float64: + // JSON numbers -> int64 (old Amino expected strings, but no client should rely on that distinction) + return int64(v), nil + + case string: + // HTTP GET: query param is always a string. + // Old Amino wrapped integer-looking strings in quotes and then used Amino decoding + if v == "" { + return 0, nil + } + + i, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return 0, spec.GenerateInvalidParamError(idx) + } + + return i, nil + + default: + // Fallback, json -> amino -> int64 + b, err := json.Marshal(v) + if err != nil { + return 0, spec.GenerateInvalidParamError(idx) + } + + var out int64 + if err := amino.UnmarshalJSON(b, &out); err != nil { + return 0, spec.GenerateInvalidParamError(idx) + } + + return out, nil + } +} + +func AsBool(params []any, idx int) (bool, *spec.BaseJSONError) { + raw := get(params, idx) + if raw == nil { + return false, nil + } + + switch v := raw.(type) { + case bool: + return v, nil + + case string: + // Accept "true"/"false" as HTTP query values + switch strings.ToLower(v) { + case "true": + return true, nil + case "false": + return false, nil + default: + return false, spec.GenerateInvalidParamError(idx) + } + + default: + // Fallback, json -> amino -> bool + b, err := json.Marshal(v) + if err != nil { + return false, spec.GenerateInvalidParamError(idx) + } + + var out bool + if err := amino.UnmarshalJSON(b, &out); err != nil { + return false, spec.GenerateInvalidParamError(idx) + } + + return out, nil + } +} diff --git a/tm2/pkg/bft/rpc/core/types/peers.go b/tm2/pkg/bft/rpc/core/types/peers.go new file mode 100644 index 00000000000..71aaf275105 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/types/peers.go @@ -0,0 +1,24 @@ +package core_types + +import ( + "github.com/gnolang/gno/tm2/pkg/p2p" + p2pTypes "github.com/gnolang/gno/tm2/pkg/p2p/types" +) + +// Peers exposes access to the current P2P peer set +type Peers interface { + // Peers returns the current peer set + Peers() p2p.PeerSet +} + +// Transport exposes read-only access to the P2P transport +type Transport interface { + // Listeners returns the addresses the node is currently listening on + Listeners() []string + + // IsListening reports whether the node is currently accepting incoming connections + IsListening() bool + + // NodeInfo returns the local node's P2P identity and metadata + NodeInfo() p2pTypes.NodeInfo +} diff --git a/tm2/pkg/bft/rpc/core/utils/utils.go b/tm2/pkg/bft/rpc/core/utils/utils.go new file mode 100644 index 00000000000..52af4ad5523 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/utils/utils.go @@ -0,0 +1,45 @@ +package utils + +import ( + "errors" + "fmt" +) + +const ( + defaultPerPage = 30 + maxPerPage = 100 +) + +// NormalizeHeight normalizes a requested height against the current chain height. +// +// Semantics: +// - requestedHeight == 0 -> use latest height +// - requestedHeight < minVal -> error +// - requestedHeight > currentHeight -> error +func NormalizeHeight(latestHeight, requestedHeight, minVal int64) (int64, error) { + // 0 means unspecified -> latest + if requestedHeight == 0 { + return latestHeight, nil + } + + if requestedHeight < minVal { + return 0, fmt.Errorf("height must be greater than or equal to %d", minVal) + } + + if requestedHeight > latestHeight { + return 0, errors.New("height must be less than or equal to the current blockchain height") + } + + return requestedHeight, nil +} + +// ValidatePerPage normalizes the page result limit (pagination) +func ValidatePerPage(perPage int) int { + if perPage < 1 { + return defaultPerPage + } else if perPage > maxPerPage { + return maxPerPage + } + + return perPage +} diff --git a/tm2/pkg/bft/rpc/core/utils/utils_test.go b/tm2/pkg/bft/rpc/core/utils/utils_test.go new file mode 100644 index 00000000000..4a10b790074 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/utils/utils_test.go @@ -0,0 +1,47 @@ +package utils + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNormalizeHeight(t *testing.T) { + t.Parallel() + + t.Run("Zero height uses latest", func(t *testing.T) { + t.Parallel() + + height, err := NormalizeHeight(10, 0, 1) + require.NoError(t, err) + + assert.Equal(t, int64(10), height) + }) + + t.Run("Below minimum", func(t *testing.T) { + t.Parallel() + + _, err := NormalizeHeight(10, 1, 2) + require.Error(t, err) + + assert.Contains(t, err.Error(), "greater than or equal to 2") + }) + + t.Run("Above latest", func(t *testing.T) { + t.Parallel() + + _, err := NormalizeHeight(10, 11, 1) + require.Error(t, err) + + assert.Contains(t, err.Error(), "current blockchain height") + }) + + t.Run("Within range", func(t *testing.T) { + t.Parallel() + + height, err := NormalizeHeight(10, 7, 1) + require.NoError(t, err) + assert.Equal(t, int64(7), height) + }) +} diff --git a/tm2/pkg/bft/rpc/lib/server/conns/connection.go b/tm2/pkg/bft/rpc/lib/server/conns/connection.go new file mode 100644 index 00000000000..53a5585394c --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/conns/connection.go @@ -0,0 +1,25 @@ +package conns + +import ( + "github.com/olahol/melody" +) + +// ConnectionManager defines a connection manager interface +// for active WS connections +type ConnectionManager interface { + // AddWSConnection registers a new WS connection + AddWSConnection(id string, session *melody.Session) + + // RemoveWSConnection Removes the WS connection with the supplied ID + RemoveWSConnection(id string) + + // GetWSConnection fetches a WS connection, if any, using the supplied ID + GetWSConnection(id string) WSConnection +} + +// WSConnection represents a single WS connection +type WSConnection interface { + // WriteData pushes out data to the WS connection. + // Returns an error if the write failed (ex. connection closed) + WriteData(data any) error +} diff --git a/tm2/pkg/bft/rpc/lib/server/conns/wsconn/ws_conns.go b/tm2/pkg/bft/rpc/lib/server/conns/wsconn/ws_conns.go new file mode 100644 index 00000000000..f3f46f2a129 --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/conns/wsconn/ws_conns.go @@ -0,0 +1,97 @@ +package wsconn + +import ( + "context" + "fmt" + "log/slog" + "sync" + + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/conns" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/writer" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/writer/ws" + "github.com/olahol/melody" +) + +// Conns manages active WS connections +type Conns struct { + logger *slog.Logger + conns map[string]Conn // ws connection ID -> conn + + mux sync.RWMutex +} + +// NewConns creates a new instance of the WS connection manager +func NewConns(logger *slog.Logger) *Conns { + return &Conns{ + logger: logger, + conns: make(map[string]Conn), + } +} + +// AddWSConnection registers a new WS connection +func (pw *Conns) AddWSConnection(id string, session *melody.Session) { + pw.mux.Lock() + defer pw.mux.Unlock() + + ctx, cancelFn := context.WithCancel(context.Background()) + + pw.conns[id] = Conn{ + ctx: ctx, + cancelFn: cancelFn, + writer: ws.New( + pw.logger.With( + "ws-conn", + fmt.Sprintf("ws-%s", id), + ), + session, + ), + } +} + +// RemoveWSConnection removes an existing WS connection +func (pw *Conns) RemoveWSConnection(id string) { + pw.mux.Lock() + defer pw.mux.Unlock() + + conn, found := pw.conns[id] + if !found { + return + } + + // Cancel the connection context + conn.cancelFn() + + delete(pw.conns, id) +} + +// GetWSConnection fetches a WS connection, if any +func (pw *Conns) GetWSConnection(id string) conns.WSConnection { + pw.mux.RLock() + defer pw.mux.RUnlock() + + conn, found := pw.conns[id] + if !found { + return nil + } + + return &conn +} + +// Conn is a single WS connection +type Conn struct { + ctx context.Context + cancelFn context.CancelFunc + + writer writer.ResponseWriter +} + +// WriteData writes arbitrary data to the WS connection +func (c *Conn) WriteData(data any) error { + if c.ctx.Err() != nil { + return c.ctx.Err() + } + + c.writer.WriteResponse(data) + + return nil +} diff --git a/tm2/pkg/bft/rpc/lib/server/handler.go b/tm2/pkg/bft/rpc/lib/server/handler.go new file mode 100644 index 00000000000..7b371da7780 --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/handler.go @@ -0,0 +1,35 @@ +package server + +import ( + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/metadata" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" +) + +// Handler executes a method with accompanying +// data such as metadata and params +type Handler func(metadata *metadata.Metadata, params []any) (any, *spec.BaseJSONError) + +type handlerEntry struct { + fn Handler + paramNames []string // index i == position i in the params +} + +type handlers map[string]*handlerEntry // method name -> handler entry + +// newHandlers creates a new map of method handlers +func newHandlers() handlers { + return make(handlers) +} + +// addHandler adds a new method handler for the specified method name +func (h handlers) addHandler(method string, handler Handler, paramNames ...string) { + h[method] = &handlerEntry{ + fn: handler, + paramNames: paramNames, + } +} + +// removeHandler removes the method handler for the specified method, if any +func (h handlers) removeHandler(method string) { + delete(h, method) +} diff --git a/tm2/pkg/bft/rpc/lib/server/handler_test.go b/tm2/pkg/bft/rpc/lib/server/handler_test.go new file mode 100644 index 00000000000..e1f8112b0b8 --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/handler_test.go @@ -0,0 +1,180 @@ +package server + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "testing" + + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/metadata" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" + "github.com/gnolang/gno/tm2/pkg/log" + "github.com/go-chi/chi/v5" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func decodeResponse[T spec.BaseJSONResponse | spec.BaseJSONResponses](t *testing.T, responseBody []byte) *T { + t.Helper() + + var response *T + + require.NoError(t, json.NewDecoder(bytes.NewReader(responseBody)).Decode(&response)) + + return response +} + +// setupTestWebServer is a helper function for common setup logic +func setupTestWebServer(t *testing.T, callback func(s *JSONRPC)) *testWebServer { + t.Helper() + + s := newWebServer(t, callback) + s.start() + + return s +} + +// TestHTTP_Handle_BatchRequest verifies that the JSON-RPC server: +// - can handle a single HTTP request to a dummy endpoint +// - can handle a batch HTTP request to a dummy endpoint +func TestHTTP_Handle(t *testing.T) { + t.Parallel() + + var ( + commonResponse = "This is a common response!" + method = "dummy" + ) + + singleRequest, err := json.Marshal( + spec.NewJSONRequest(1, method, nil), + ) + require.NoError(t, err) + + requests := spec.BaseJSONRequests{ + spec.NewJSONRequest(1, method, nil), + spec.NewJSONRequest(2, method, nil), + spec.NewJSONRequest(3, method, nil), + } + + batchRequest, err := json.Marshal(requests) + require.NoError(t, err) + + testTable := []struct { + verifyResponse func(response []byte) error + name string + request []byte + }{ + { + func(resp []byte) error { + response := decodeResponse[spec.BaseJSONResponse](t, resp) + + assert.Equal(t, spec.NewJSONResponse(1, commonResponse, nil), response) + + return nil + }, + "single HTTP request", + singleRequest, + }, + { + func(resp []byte) error { + responses := decodeResponse[spec.BaseJSONResponses](t, resp) + + for index, response := range *responses { + assert.Equal( + t, + spec.NewJSONResponse(uint(index+1), commonResponse, nil), + response, + ) + } + + return nil + }, + "batch HTTP request", + batchRequest, + }, + } + + for _, testCase := range testTable { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + // Create a new JSON-RPC server + webServer := setupTestWebServer(t, func(s *JSONRPC) { + s.handlers = make(handlers) + + s.handlers.addHandler(method, func(_ *metadata.Metadata, _ []any) (any, *spec.BaseJSONError) { + return commonResponse, nil + }) + }) + + defer webServer.stop() + + respRaw, err := http.Post( + webServer.address(), + jsonMimeType, + bytes.NewBuffer(testCase.request), + ) + if err != nil { + t.Fatalf("unexpected HTTP error, %v", err) + } + + resp, err := io.ReadAll(respRaw.Body) + if err != nil { + t.Fatalf("unable to read response body, %v", err) + } + + if err := testCase.verifyResponse(resp); err != nil { + t.Fatalf("unable to verify response, %v", err) + } + }) + } +} + +type testWebServer struct { + mux *chi.Mux + listener net.Listener +} + +func newWebServer(t *testing.T, callbacks ...func(s *JSONRPC)) *testWebServer { + t.Helper() + + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("unable to start listen, %v", err) + } + + mux := chi.NewMux() + webServer := &testWebServer{ + mux: mux, + listener: listener, + } + + s := NewJSONRPC(WithLogger(log.NewNoopLogger())) + + for _, callback := range callbacks { + callback(s) + } + + // Hook up the JSON-RPC server to the mux + mux.Mount("/", s.SetupRoutes(chi.NewMux())) + + return webServer +} + +func (ms *testWebServer) start() { + go func() { + //nolint:errcheck // No need to check error + _ = http.Serve(ms.listener, ms.mux) + }() +} + +func (ms *testWebServer) stop() { + _ = ms.listener.Close() +} + +func (ms *testWebServer) address() string { + return fmt.Sprintf("http://%s", ms.listener.Addr().String()) +} diff --git a/tm2/pkg/bft/rpc/lib/server/jsonrpc.go b/tm2/pkg/bft/rpc/lib/server/jsonrpc.go new file mode 100644 index 00000000000..76aa7ce9520 --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/jsonrpc.go @@ -0,0 +1,437 @@ +package server + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "log/slog" + "net/http" + "sort" + + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/conns" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/conns/wsconn" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/metadata" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/writer" + httpWriter "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/writer/http" + wsWriter "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/writer/ws" + "github.com/gnolang/gno/tm2/pkg/log" + "github.com/go-chi/chi/v5" + "github.com/go-chi/chi/v5/middleware" + "github.com/google/uuid" + "github.com/olahol/melody" +) + +const ( + jsonMimeType = "application/json" // Only JSON is supported + maxRequestBodySize = 1 << 20 // 1MB + wsIDKey = "ws-id" // key used for WS connection metadata +) + +// maxSizeMiddleware enforces a 1MB size limit on the request body +func maxSizeMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + r.Body = http.MaxBytesReader(w, r.Body, maxRequestBodySize) + + next.ServeHTTP(w, r) + }) +} + +// JSONRPC is the JSONRPC server instance, that is capable +// of handling both HTTP and WS requests +type JSONRPC struct { + // wsConns keeps track of WS connections + // that need to be directly accessed by certain methods + wsConns conns.ConnectionManager + + logger *slog.Logger + + // handlers are the registered method handlers + handlers handlers + + // ws handles incoming and active WS connections + ws *melody.Melody +} + +// NewJSONRPC creates a new instance of the JSONRPC server +func NewJSONRPC(opts ...Option) *JSONRPC { + j := &JSONRPC{ + logger: log.NewNoopLogger(), + handlers: newHandlers(), + ws: melody.New(), + } + + for _, opt := range opts { + opt(j) + } + + // Set up the WS connection manager + j.wsConns = wsconn.NewConns(j.logger) + + // Set up the WS listeners + j.setupWSListeners() + + return j +} + +// SetupRoutes sets up the request router for the JSON-RPC service +func (j *JSONRPC) SetupRoutes(mux *chi.Mux) *chi.Mux { + // Set up the middlewares + mux.Use(middleware.AllowContentType(jsonMimeType)) + mux.Use(maxSizeMiddleware) + + // OPTIONS requests are ignored + mux.Options("/", func(http.ResponseWriter, *http.Request) {}) + + // Browser-friendly endpoints (GET) + mux.Get("/", j.handleIndexRequest) + mux.Get("/{method}", j.handleHTTPGetRequest) + + // Register the POST method handler for HTTP requests + mux.Post("/", j.handleHTTPRequest) + + // Register the WS method handler + mux.HandleFunc("/websocket", j.handleWSRequest) + + return mux +} + +// RegisterHandler registers a new method handler, +// overwriting existing ones, if any +func (j *JSONRPC) RegisterHandler(method string, handler Handler, paramNames ...string) { + j.handlers.addHandler(method, handler, paramNames...) +} + +// UnregisterHandler removes the method handler for the specified method, if any +func (j *JSONRPC) UnregisterHandler(method string) { + j.handlers.removeHandler(method) +} + +// setupWSListeners sets up handlers for WS events +func (j *JSONRPC) setupWSListeners() { + // Set up the new connection handler + j.ws.HandleConnect(func(s *melody.Session) { + j.logger.Info( + "WS connection established", + "remote", s.RemoteAddr().String(), + ) + + // Generate the WS ID + wsID := uuid.NewString() + s.Set(wsIDKey, wsID) + + // Register the connection so it's queryable + j.wsConns.AddWSConnection(wsID, s) + }) + + // Set up the connection disconnect handler + j.ws.HandleDisconnect(func(s *melody.Session) { + j.logger.Info( + "WS connection terminated", + "remote", s.RemoteAddr().String(), + ) + + // Read the WS ID + wsIDRaw, _ := s.Get(wsIDKey) + wsConnID := wsIDRaw.(string) + + // Remove the WS connection + j.wsConns.RemoveWSConnection(wsConnID) + }) + + // Set up the core message method handler + j.ws.HandleMessage(func(s *melody.Session, msg []byte) { + // Extract the base request + requests, err := extractBaseRequests(msg) + if err != nil { + // Malformed requests are completely ignored + return + } + + // Get the ID associated with this active WS connection + wsIDRaw, _ := s.Get(wsIDKey) + wsConnID := wsIDRaw.(string) + + // Handle the request + j.handleRequest( + metadata.NewMetadata( + s.RemoteAddr().String(), + metadata.WithWebSocketID(wsConnID), + ), + wsWriter.New(j.logger, s), + requests, + ) + }) +} + +// handleHTTPRequest handles incoming HTTP requests +func (j *JSONRPC) handleHTTPRequest(w http.ResponseWriter, r *http.Request) { + requestBody, readErr := io.ReadAll(r.Body) + if readErr != nil { + http.Error( + w, + "unable to read request", + http.StatusBadRequest, + ) + + return + } + + requests, err := extractBaseRequests(requestBody) + if err != nil { + http.Error( + w, + "Invalid request body", + http.StatusBadRequest, + ) + + return + } + + // Handle the request + w.Header().Set("Content-Type", jsonMimeType) + j.handleRequest( + metadata.NewMetadata(r.RemoteAddr), + httpWriter.New(j.logger, w), + requests, + ) +} + +// handleWSRequest handles incoming WS requests +func (j *JSONRPC) handleWSRequest(w http.ResponseWriter, r *http.Request) { + if err := j.ws.HandleRequest(w, r); err != nil { + j.logger.Error( + "unable to initialize WS connection", + "err", err, + ) + } +} + +// handleRequest handles the specific requests with a +// custom response writer +func (j *JSONRPC) handleRequest( + metadata *metadata.Metadata, + writer writer.ResponseWriter, + requests spec.BaseJSONRequests, +) { + // Parse all JSON-RPC requests + responses := make(spec.BaseJSONResponses, len(requests)) + + for i, baseRequest := range requests { + // Log the request + j.logger.Debug( + "incoming request", + "request", baseRequest, + ) + + // Make sure it's a valid base request + if !isValidBaseRequest(baseRequest) { + // Marshal the JSON-RPC error + responses[i] = spec.NewJSONResponse( + baseRequest.ID, + nil, + spec.NewJSONError( + "invalid JSON-RPC 2.0 request", + spec.InvalidRequestErrorCode, + ), + ) + + continue + } + + // Run the method methodHandler + handleResp, handleErr := j.route(metadata, baseRequest) + if handleErr != nil { + j.logger.Debug( + "unable to handle JSON-RPC request", + "request", baseRequest, + "err", handleErr, + ) + + responses[i] = spec.NewJSONResponse( + baseRequest.ID, + nil, + handleErr, + ) + + continue + } + + j.logger.Debug( + "handled request", + "request", baseRequest, + ) + + responses[i] = spec.NewJSONResponse( + baseRequest.ID, + handleResp, + nil, + ) + } + + if len(responses) == 1 { + // Write the JSON response as a single response + writer.WriteResponse(responses[0]) + + return + } + + // Write the JSON response as a batch + writer.WriteResponse(responses) +} + +// route routes the base request to the appropriate handler +func (j *JSONRPC) route( + metadata *metadata.Metadata, + request *spec.BaseJSONRequest, +) (any, *spec.BaseJSONError) { + // Get the appropriate handler + entry, ok := j.handlers[request.Method] + if !ok { + return nil, spec.NewJSONError( + "Method handler not set", + spec.MethodNotFoundErrorCode, + ) + } + + return entry.fn(metadata, request.Params) +} + +// handleHTTPGetRequest parses the GET request, extracts the query params, and passes +// the JSON-RPC request on for further processing +func (j *JSONRPC) handleHTTPGetRequest(w http.ResponseWriter, r *http.Request) { + method := chi.URLParam(r, "method") + + entry, ok := j.handlers[method] + if !ok { + http.Error(w, "method not found", http.StatusNotFound) + + return + } + + q := r.URL.Query() + + // Query param order does not actually matter, but the ordering of + // the params for the POST handler does. Because of this, we build the + // params slice in the canonical order defined by the param names + params := make([]any, len(entry.paramNames)) + for i, name := range entry.paramNames { + val := q.Get(name) + if val == "" { + params[i] = nil + + continue + } + + params[i] = val + } + + baseReq := &spec.BaseJSONRequest{ + BaseJSON: spec.BaseJSON{ + JSONRPC: spec.JSONRPCVersion, + ID: 0, + }, + Method: method, + Params: params, + } + + w.Header().Set("Content-Type", jsonMimeType) + + j.handleRequest( + metadata.NewMetadata(r.RemoteAddr), + httpWriter.New(j.logger, w), + spec.BaseJSONRequests{baseReq}, + ) +} + +// handleIndexRequest writes the list of available rpc endpoints as an HTML page +func (j *JSONRPC) handleIndexRequest(w http.ResponseWriter, r *http.Request) { + // Separate methods with and without args + noArgNames := make([]string, 0, len(j.handlers)) + argNames := make([]string, 0, len(j.handlers)) + + for name, entry := range j.handlers { + if len(entry.paramNames) == 0 { + noArgNames = append(noArgNames, name) + + continue + } + + argNames = append(argNames, name) + } + + sort.Strings(noArgNames) + sort.Strings(argNames) + + var buf bytes.Buffer + + buf.WriteString("") + buf.WriteString("
Available endpoints:
") + + host := r.Host + + // Endpoints without arguments + for _, name := range noArgNames { + link := fmt.Sprintf("//%s/%s", host, name) + fmt.Fprintf(&buf, "%s
", link, link) + } + + buf.WriteString("
Endpoints that require arguments:
") + + // Endpoints with arguments + for _, name := range argNames { + entry := j.handlers[name] + + link := fmt.Sprintf("//%s/%s?", host, name) + for i, argName := range entry.paramNames { + link += argName + "=_" + + if i < len(entry.paramNames)-1 { + link += "&" + } + } + + fmt.Fprintf(&buf, "%s
", link, link) + } + + buf.WriteString("") + + w.Header().Set("Content-Type", "text/html") + w.WriteHeader(http.StatusOK) + + if _, err := buf.WriteTo(w); err != nil { + j.logger.Error("failed to write RPC endpoint index", "err", err) + } +} + +// isValidBaseRequest validates that the base JSON request is valid +func isValidBaseRequest(baseRequest *spec.BaseJSONRequest) bool { + if baseRequest.Method == "" { + return false + } + + return baseRequest.JSONRPC == spec.JSONRPCVersion +} + +// extractBaseRequests extracts the base JSON-RPC request from the +// request body +func extractBaseRequests(requestBody []byte) (spec.BaseJSONRequests, error) { + // Extract the request + var requests spec.BaseJSONRequests + + // Check if the request is a batch request + if err := json.Unmarshal(requestBody, &requests); err != nil { + // Try to get a single JSON-RPC request, since this is not a batch + var baseRequest *spec.BaseJSONRequest + if err := json.Unmarshal(requestBody, &baseRequest); err != nil { + return nil, err + } + + requests = spec.BaseJSONRequests{ + baseRequest, + } + } + + return requests, nil +} diff --git a/tm2/pkg/bft/rpc/lib/server/metadata/metadata.go b/tm2/pkg/bft/rpc/lib/server/metadata/metadata.go new file mode 100644 index 00000000000..9752c1e3639 --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/metadata/metadata.go @@ -0,0 +1,26 @@ +package metadata + +// Metadata houses the active request metadata +type Metadata struct { + WebSocketID *string + RemoteAddr string +} + +// NewMetadata creates a new request metadata object +func NewMetadata(remoteAddr string, opts ...Option) *Metadata { + m := &Metadata{ + RemoteAddr: remoteAddr, + } + + for _, opt := range opts { + opt(m) + } + + return m +} + +// IsWS returns a flag indicating if the request +// belongs to a WS connection +func (m *Metadata) IsWS() bool { + return m.WebSocketID != nil +} diff --git a/tm2/pkg/bft/rpc/lib/server/metadata/metadata_test.go b/tm2/pkg/bft/rpc/lib/server/metadata/metadata_test.go new file mode 100644 index 00000000000..8b3ff3c29e8 --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/metadata/metadata_test.go @@ -0,0 +1,38 @@ +package metadata + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestMetadata_NewMetadata(t *testing.T) { + t.Parallel() + + t.Run("HTTP metadata", func(t *testing.T) { + t.Parallel() + + address := "remote address" + m := NewMetadata(address) + + require.NotNil(t, m) + + assert.Equal(t, address, m.RemoteAddr) + assert.False(t, m.IsWS()) + }) + + t.Run("WS metadata", func(t *testing.T) { + t.Parallel() + + address := "remote address" + wsID := "ws ID" + m := NewMetadata(address, WithWebSocketID(wsID)) + + require.NotNil(t, m) + + assert.Equal(t, address, m.RemoteAddr) + assert.True(t, m.IsWS()) + assert.Equal(t, wsID, *m.WebSocketID) + }) +} diff --git a/tm2/pkg/bft/rpc/lib/server/metadata/options.go b/tm2/pkg/bft/rpc/lib/server/metadata/options.go new file mode 100644 index 00000000000..af1f9627cbb --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/metadata/options.go @@ -0,0 +1,11 @@ +package metadata + +type Option func(m *Metadata) + +// WithWebSocketID sets the WS connection ID +// for the connection metadata +func WithWebSocketID(id string) Option { + return func(m *Metadata) { + m.WebSocketID = &id + } +} diff --git a/tm2/pkg/bft/rpc/lib/server/options.go b/tm2/pkg/bft/rpc/lib/server/options.go new file mode 100644 index 00000000000..22658069896 --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/options.go @@ -0,0 +1,13 @@ +package server + +import "log/slog" + +type Option func(s *JSONRPC) + +// WithLogger sets the logger to be used +// with the JSON-RPC server +func WithLogger(logger *slog.Logger) Option { + return func(s *JSONRPC) { + s.logger = logger + } +} diff --git a/tm2/pkg/bft/rpc/lib/server/server.go b/tm2/pkg/bft/rpc/lib/server/server.go new file mode 100644 index 00000000000..dee3a83939b --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/server.go @@ -0,0 +1,69 @@ +package server + +import ( + "context" + "errors" + "log/slog" + "net" + "net/http" + "time" + + "golang.org/x/sync/errgroup" +) + +const DefaultListenAddress = "0.0.0.0:26657" // TODO move? + +type Server struct { + h http.Handler + logger *slog.Logger + addr string +} + +func NewHTTPServer(h http.Handler, addr string, logger *slog.Logger) *Server { + return &Server{h: h, addr: addr, logger: logger} +} + +// Serve serves the JSON-RPC server +func (s *Server) Serve(ctx context.Context) error { + srv := &http.Server{ + Addr: s.addr, + Handler: s.h, + ReadHeaderTimeout: 60 * time.Second, + WriteTimeout: 60 * time.Second, + } + + group, gCtx := errgroup.WithContext(ctx) + + group.Go(func() error { + defer s.logger.Info("RPC server shut down") + + ln, err := net.Listen("tcp", srv.Addr) + if err != nil { + return err + } + + s.logger.Info( + "RPC server started", + "address", ln.Addr().String(), + ) + + if err = srv.Serve(ln); err != nil && !errors.Is(err, http.ErrServerClosed) { + return err + } + + return nil + }) + + group.Go(func() error { + <-gCtx.Done() + + s.logger.Info("RPC server to be shut down") + + wsCtx, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + + return srv.Shutdown(wsCtx) + }) + + return group.Wait() +} diff --git a/tm2/pkg/bft/rpc/lib/server/spec/errors.go b/tm2/pkg/bft/rpc/lib/server/spec/errors.go new file mode 100644 index 00000000000..cc521127c2d --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/spec/errors.go @@ -0,0 +1,9 @@ +package spec + +const ( + ParseErrorCode int = -32700 + InvalidParamsErrorCode int = -32602 + MethodNotFoundErrorCode int = -32601 + InvalidRequestErrorCode int = -32600 + ServerErrorCode int = -32000 +) diff --git a/tm2/pkg/bft/rpc/lib/server/spec/spec.go b/tm2/pkg/bft/rpc/lib/server/spec/spec.go new file mode 100644 index 00000000000..0ec718c629d --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/spec/spec.go @@ -0,0 +1,140 @@ +package spec + +import ( + "encoding/json" + "fmt" +) + +const JSONRPCVersion = "2.0" + +// BaseJSON defines the base JSON fields +// all JSON-RPC requests and responses need to have +type BaseJSON struct { + JSONRPC string `json:"jsonrpc"` + ID uint `json:"id,omitempty"` // TODO support string IDs +} + +// BaseJSONRequest defines the base JSON request format +type BaseJSONRequest struct { + BaseJSON + + Method string `json:"method"` + Params []any `json:"params"` +} + +// BaseJSONRequests represents a batch of JSON-RPC requests +type BaseJSONRequests []*BaseJSONRequest + +// BaseJSONResponses represents a batch of JSON-RPC responses +type BaseJSONResponses []*BaseJSONResponse + +// BaseJSONResponse defines the base JSON response format +type BaseJSONResponse struct { + Result any `json:"result"` + Error *BaseJSONError `json:"error,omitempty"` + BaseJSON +} + +// BaseJSONError defines the base JSON response error format +type BaseJSONError struct { + Data any `json:"data,omitempty"` + Message string `json:"message"` + Code int `json:"code"` +} + +// NewJSONRequest creates a new JSON-RPC request +func NewJSONRequest( + id uint, + method string, + params []any, +) *BaseJSONRequest { + return &BaseJSONRequest{ + BaseJSON: BaseJSON{ + ID: id, + JSONRPC: JSONRPCVersion, + }, + Method: method, + Params: params, + } +} + +// NewJSONResponse creates a new JSON-RPC response +func NewJSONResponse( + id uint, + result any, + err *BaseJSONError, +) *BaseJSONResponse { + return &BaseJSONResponse{ + BaseJSON: BaseJSON{ + ID: id, + JSONRPC: JSONRPCVersion, + }, + Result: result, + Error: err, + } +} + +// NewJSONError creates a new JSON-RPC error +func NewJSONError(message string, code int) *BaseJSONError { + return &BaseJSONError{ + Code: code, + Message: message, + } +} + +// GenerateResponseError generates the JSON-RPC server error response +func GenerateResponseError(err error) *BaseJSONError { + return NewJSONError(err.Error(), ServerErrorCode) +} + +// GenerateInvalidParamError generates the JSON-RPC invalid param error response +func GenerateInvalidParamError(index int) *BaseJSONError { + return NewJSONError( + fmt.Sprintf( + "Invalid %s parameter", + getOrdinalSuffix(index), + ), + InvalidParamsErrorCode, + ) +} + +func getOrdinalSuffix(num int) string { + switch num % 10 { + case 1: + if num%100 != 11 { + return fmt.Sprintf("%d%s", num, "st") + } + case 2: + if num%100 != 12 { + return fmt.Sprintf("%d%s", num, "nd") + } + case 3: + if num%100 != 13 { + return fmt.Sprintf("%d%s", num, "rd") + } + } + + return fmt.Sprintf("%d%s", num, "th") +} + +// GenerateInvalidParamCountError generates the JSON-RPC invalid param count error +func GenerateInvalidParamCountError() *BaseJSONError { + return NewJSONError( + "Invalid number of parameters", + InvalidParamsErrorCode, + ) +} + +func ParseObjectParameter[T any](param any, data *T) error { + marshaled, err := json.Marshal(param) + if err != nil { + return err + } + + err = json.Unmarshal(marshaled, data) + if err != nil { + return err + } + + return nil +} diff --git a/tm2/pkg/bft/rpc/lib/server/writer/http/http.go b/tm2/pkg/bft/rpc/lib/server/writer/http/http.go new file mode 100644 index 00000000000..545d864c24a --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/writer/http/http.go @@ -0,0 +1,34 @@ +package http + +import ( + "encoding/json" + "log/slog" + "net/http" + + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/writer" +) + +var _ writer.ResponseWriter = (*ResponseWriter)(nil) + +type ResponseWriter struct { + logger *slog.Logger + + w http.ResponseWriter +} + +func New(logger *slog.Logger, w http.ResponseWriter) ResponseWriter { + return ResponseWriter{ + logger: logger.With("writer", "http-writer"), + w: w, + } +} + +func (h ResponseWriter) WriteResponse(response any) { + // TODO use amino encoding + if err := json.NewEncoder(h.w).Encode(response); err != nil { + h.logger.Error( + "unable to encode JSON response", + "err", err, + ) + } +} diff --git a/tm2/pkg/bft/rpc/lib/server/writer/writer.go b/tm2/pkg/bft/rpc/lib/server/writer/writer.go new file mode 100644 index 00000000000..d86a25e2359 --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/writer/writer.go @@ -0,0 +1,9 @@ +package writer + +// ResponseWriter outlines the interface any +// JSON-RPC response writer needs to implement +type ResponseWriter interface { + // WriteResponse takes in the JSON-RPC response + // which is either a single object, or a batch + WriteResponse(response any) +} diff --git a/tm2/pkg/bft/rpc/lib/server/writer/ws/ws.go b/tm2/pkg/bft/rpc/lib/server/writer/ws/ws.go new file mode 100644 index 00000000000..7cbbf94d1f6 --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/writer/ws/ws.go @@ -0,0 +1,45 @@ +package ws + +import ( + "encoding/json" + "log/slog" + + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/writer" + "github.com/olahol/melody" +) + +var _ writer.ResponseWriter = (*ResponseWriter)(nil) + +type ResponseWriter struct { + logger *slog.Logger + + s *melody.Session +} + +func New(logger *slog.Logger, s *melody.Session) ResponseWriter { + return ResponseWriter{ + logger: logger.With("writer", "ws-writer"), + s: s, + } +} + +func (w ResponseWriter) WriteResponse(response any) { + // TODO use amino encoding + + jsonRaw, encodeErr := json.Marshal(response) + if encodeErr != nil { + w.logger.Error( + "unable to encode JSON-RPC response", + "err", encodeErr, + ) + + return + } + + if err := w.s.Write(jsonRaw); err != nil { + w.logger.Error( + "unable to write WS response", + "err", err, + ) + } +} diff --git a/tm2/pkg/bft/rpc/lib/server/handlers.go b/tm2/pkg/bft/rpc/lib/server_old/handlers.go similarity index 100% rename from tm2/pkg/bft/rpc/lib/server/handlers.go rename to tm2/pkg/bft/rpc/lib/server_old/handlers.go diff --git a/tm2/pkg/bft/rpc/lib/server/handlers_test.go b/tm2/pkg/bft/rpc/lib/server_old/handlers_test.go similarity index 100% rename from tm2/pkg/bft/rpc/lib/server/handlers_test.go rename to tm2/pkg/bft/rpc/lib/server_old/handlers_test.go diff --git a/tm2/pkg/bft/rpc/lib/server/http_params.go b/tm2/pkg/bft/rpc/lib/server_old/http_params.go similarity index 100% rename from tm2/pkg/bft/rpc/lib/server/http_params.go rename to tm2/pkg/bft/rpc/lib/server_old/http_params.go diff --git a/tm2/pkg/bft/rpc/lib/server/http_server.go b/tm2/pkg/bft/rpc/lib/server_old/http_server.go similarity index 100% rename from tm2/pkg/bft/rpc/lib/server/http_server.go rename to tm2/pkg/bft/rpc/lib/server_old/http_server.go diff --git a/tm2/pkg/bft/rpc/lib/server/http_server_test.go b/tm2/pkg/bft/rpc/lib/server_old/http_server_test.go similarity index 100% rename from tm2/pkg/bft/rpc/lib/server/http_server_test.go rename to tm2/pkg/bft/rpc/lib/server_old/http_server_test.go diff --git a/tm2/pkg/bft/rpc/lib/server/parse_test.go b/tm2/pkg/bft/rpc/lib/server_old/parse_test.go similarity index 100% rename from tm2/pkg/bft/rpc/lib/server/parse_test.go rename to tm2/pkg/bft/rpc/lib/server_old/parse_test.go diff --git a/tm2/pkg/bft/rpc/lib/server/test.crt b/tm2/pkg/bft/rpc/lib/server_old/test.crt similarity index 100% rename from tm2/pkg/bft/rpc/lib/server/test.crt rename to tm2/pkg/bft/rpc/lib/server_old/test.crt diff --git a/tm2/pkg/bft/rpc/lib/server/test.key b/tm2/pkg/bft/rpc/lib/server_old/test.key similarity index 100% rename from tm2/pkg/bft/rpc/lib/server/test.key rename to tm2/pkg/bft/rpc/lib/server_old/test.key diff --git a/tm2/pkg/bft/rpc/lib/server/write_endpoints_test.go b/tm2/pkg/bft/rpc/lib/server_old/write_endpoints_test.go similarity index 100% rename from tm2/pkg/bft/rpc/lib/server/write_endpoints_test.go rename to tm2/pkg/bft/rpc/lib/server_old/write_endpoints_test.go