diff --git a/clientv3/balancer/grpc1.7-health_test.go b/clientv3/balancer/grpc1.7-health_test.go index 6ea7e0d6abf9..bb4f7b8a861a 100644 --- a/clientv3/balancer/grpc1.7-health_test.go +++ b/clientv3/balancer/grpc1.7-health_test.go @@ -191,7 +191,7 @@ func TestOldHealthBalancerGraylist(t *testing.T) { // TestBalancerDoNotBlockOnClose ensures that balancer and grpc don't deadlock each other // due to rapid open/close conn. The deadlock causes balancer.Close() to block forever. -// See issue: https://go.etcd.io/etcd/issues/7283 for more detail. +// See issue: https://github.com/etcd-io/etcd/issues/7283 for more detail. func TestOldHealthBalancerDoNotBlockOnClose(t *testing.T) { defer testutil.AfterTest(t) diff --git a/clientv3/doc.go b/clientv3/doc.go index 649471774d27..01a3f5961a7e 100644 --- a/clientv3/doc.go +++ b/clientv3/doc.go @@ -61,7 +61,7 @@ // // 1. context error: canceled or deadline exceeded. // 2. gRPC status error: e.g. when clock drifts in server-side before client's context deadline exceeded. -// 3. gRPC error: see https://go.etcd.io/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go +// 3. gRPC error: see https://github.com/etcd-io/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go // // Here is the example code to handle client errors: // diff --git a/clientv3/lease.go b/clientv3/lease.go index 380de02a882a..c2796fc969af 100644 --- a/clientv3/lease.go +++ b/clientv3/lease.go @@ -132,7 +132,7 @@ type Lease interface { // given context "ctx" is canceled or timed out. // // TODO(v4.0): post errors to last keep alive message before closing - // (see https://go.etcd.io/etcd/pull/7866) + // (see https://github.com/etcd-io/etcd/pull/7866) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) // KeepAliveOnce renews the lease once. The response corresponds to the diff --git a/clientv3/watch.go b/clientv3/watch.go index 9c677ac7ba1d..8ec58bb14695 100644 --- a/clientv3/watch.go +++ b/clientv3/watch.go @@ -68,7 +68,7 @@ type Watcher interface { // TODO: explicitly set context error in the last "WatchResponse" message and close channel? // Currently, client contexts are overwritten with "valCtx" that never closes. // TODO(v3.4): configure watch retry policy, limit maximum retry number - // (see https://go.etcd.io/etcd/issues/8980) + // (see https://github.com/etcd-io/etcd/issues/8980) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan // RequestProgress requests a progress notify response be sent in all watch channels. diff --git a/embed/config.go b/embed/config.go index 00e59ea18f97..a7d50d04227a 100644 --- a/embed/config.go +++ b/embed/config.go @@ -162,7 +162,7 @@ type Config struct { // // If single-node, it advances ticks regardless. // - // See https://go.etcd.io/etcd/issues/9333 for more detail. + // See https://github.com/etcd-io/etcd/issues/9333 for more detail. InitialElectionTickAdvance bool `json:"initial-election-tick-advance"` QuotaBackendBytes int64 `json:"quota-backend-bytes"` @@ -246,7 +246,7 @@ type Config struct { // CVE-2018-5702 reference: // - https://bugs.chromium.org/p/project-zero/issues/detail?id=1447#c2 // - https://github.com/transmission/transmission/pull/468 - // - https://go.etcd.io/etcd/issues/9353 + // - https://github.com/etcd-io/etcd/issues/9353 HostWhitelist map[string]struct{} // UserHandlers is for registering users handlers and only used for diff --git a/embed/etcd.go b/embed/etcd.go index 88086d2ae992..e4e17fb2d80f 100644 --- a/embed/etcd.go +++ b/embed/etcd.go @@ -412,7 +412,7 @@ func stopServers(ctx context.Context, ss *servers) { // do not grpc.Server.GracefulStop with TLS enabled etcd server // See https://github.com/grpc/grpc-go/issues/1384#issuecomment-317124531 - // and https://go.etcd.io/etcd/issues/8916 + // and https://github.com/etcd-io/etcd/issues/8916 if ss.secure { shutdownNow() return diff --git a/etcdctl/ctlv2/command/member_commands.go b/etcdctl/ctlv2/command/member_commands.go index 4ed78cc8dec3..84a9634c541a 100644 --- a/etcdctl/ctlv2/command/member_commands.go +++ b/etcdctl/ctlv2/command/member_commands.go @@ -164,7 +164,7 @@ func actionMemberRemove(c *cli.Context) error { if m.Name == removalID { // Note that, so long as it's not ambiguous, we *could* do the right thing by name here. fmt.Fprintf(os.Stderr, "Found a member named %s; if this is correct, please use its ID, eg:\n\tetcdctl member remove %s\n", m.Name, m.ID) - fmt.Fprintf(os.Stderr, "For more details, read the documentation at https://go.etcd.io/etcd/blob/master/Documentation/runtime-configuration.md#remove-a-member\n\n") + fmt.Fprintf(os.Stderr, "For more details, read the documentation at https://github.com/etcd-io/etcd/blob/master/Documentation/runtime-configuration.md#remove-a-member\n\n") } } if !foundID { diff --git a/etcdctl/ctlv3/command/global.go b/etcdctl/ctlv3/command/global.go index 101a2d20976d..eb90f2a4fac8 100644 --- a/etcdctl/ctlv3/command/global.go +++ b/etcdctl/ctlv3/command/global.go @@ -135,7 +135,7 @@ func clientConfigFromCmd(cmd *cobra.Command) *clientConfig { // WARNING logs contain important information like TLS misconfirugation, but spams // too many routine connection disconnects to turn on by default. // - // See https://go.etcd.io/etcd/pull/9623 for background + // See https://github.com/etcd-io/etcd/pull/9623 for background clientv3.SetLogger(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, os.Stderr)) } diff --git a/etcdserver/api/membership/cluster.go b/etcdserver/api/membership/cluster.go index e0f3caacc735..6f4b96a44f15 100644 --- a/etcdserver/api/membership/cluster.go +++ b/etcdserver/api/membership/cluster.go @@ -516,7 +516,7 @@ func (c *RaftCluster) IsReadyToAddNewMember() bool { if nstarted == 1 && nmembers == 2 { // a case of adding a new node to 1-member cluster for restoring cluster data - // https://go.etcd.io/etcd/blob/master/Documentation/v2/admin_guide.md#restoring-the-cluster + // https://github.com/etcd-io/etcd/blob/master/Documentation/v2/admin_guide.md#restoring-the-cluster if c.lg != nil { c.lg.Debug("number of started member is 1; can accept add member request") } else { diff --git a/etcdserver/config.go b/etcdserver/config.go index 143488bbe3a8..7ce88cfaf3bc 100644 --- a/etcdserver/config.go +++ b/etcdserver/config.go @@ -96,7 +96,7 @@ type ServerConfig struct { // // If single-node, it advances ticks regardless. // - // See https://go.etcd.io/etcd/issues/9333 for more detail. + // See https://github.com/etcd-io/etcd/issues/9333 for more detail. InitialElectionTickAdvance bool BootstrapTimeout time.Duration diff --git a/functional/tester/case_network_delay.go b/functional/tester/case_network_delay.go index ec4e9ccca2d4..60da43378ba8 100644 --- a/functional/tester/case_network_delay.go +++ b/functional/tester/case_network_delay.go @@ -26,7 +26,7 @@ const ( // Wait more when it recovers from slow network, because network layer // needs extra time to propagate traffic control (tc command) change. // Otherwise, we get different hash values from the previous revision. - // For more detail, please see https://go.etcd.io/etcd/issues/5121. + // For more detail, please see https://github.com/etcd-io/etcd/issues/5121. waitRecover = 5 * time.Second ) diff --git a/functional/tester/cluster.go b/functional/tester/cluster.go index fe52e355df2f..6dfb3085bbb2 100644 --- a/functional/tester/cluster.go +++ b/functional/tester/cluster.go @@ -593,7 +593,7 @@ func (clus *Cluster) WaitHealth() error { // wait 60s to check cluster health. // TODO: set it to a reasonable value. It is set that high because // follower may use long time to catch up the leader when reboot under - // reasonable workload (https://go.etcd.io/etcd/issues/2698) + // reasonable workload (https://github.com/etcd-io/etcd/issues/2698) for i := 0; i < 60; i++ { for _, m := range clus.Members { if err = m.WriteHealthKey(); err != nil { diff --git a/integration/cluster_test.go b/integration/cluster_test.go index 113e2262dc2e..c1681da69ecc 100644 --- a/integration/cluster_test.go +++ b/integration/cluster_test.go @@ -450,7 +450,7 @@ func TestRejectUnhealthyRemove(t *testing.T) { // TestRestartRemoved ensures that restarting removed member must exit // if 'initial-cluster-state' is set 'new' and old data directory still exists -// (see https://go.etcd.io/etcd/issues/7512 for more). +// (see https://github.com/etcd-io/etcd/issues/7512 for more). func TestRestartRemoved(t *testing.T) { defer testutil.AfterTest(t) diff --git a/integration/v3_election_test.go b/integration/v3_election_test.go index ea6d8ae9916c..4598448001b9 100644 --- a/integration/v3_election_test.go +++ b/integration/v3_election_test.go @@ -201,7 +201,7 @@ func TestElectionSessionRecampaign(t *testing.T) { // TestElectionOnPrefixOfExistingKey checks that a single // candidate can be elected on a new key that is a prefix // of an existing key. To wit, check for regression -// of bug #6278. https://go.etcd.io/etcd/issues/6278 +// of bug #6278. https://github.com/etcd-io/etcd/issues/6278 // func TestElectionOnPrefixOfExistingKey(t *testing.T) { clus := NewClusterV3(t, &ClusterConfig{Size: 1}) diff --git a/integration/v3_grpc_inflight_test.go b/integration/v3_grpc_inflight_test.go index 4b76513fa49b..1a0a11a8e1ba 100644 --- a/integration/v3_grpc_inflight_test.go +++ b/integration/v3_grpc_inflight_test.go @@ -60,7 +60,7 @@ func TestV3MaintenanceDefragmentInflightRange(t *testing.T) { // TestV3KVInflightRangeRequests ensures that inflight requests // (sent before server shutdown) are gracefully handled by server-side. // They are either finished or canceled, but never crash the backend. -// See https://go.etcd.io/etcd/issues/7322 for more detail. +// See https://github.com/etcd-io/etcd/issues/7322 for more detail. func TestV3KVInflightRangeRequests(t *testing.T) { defer testutil.AfterTest(t) clus := NewClusterV3(t, &ClusterConfig{Size: 1}) diff --git a/integration/v3_lease_test.go b/integration/v3_lease_test.go index 86b6f8d99985..7b378f0c92f6 100644 --- a/integration/v3_lease_test.go +++ b/integration/v3_lease_test.go @@ -337,14 +337,14 @@ func TestV3LeaseLeases(t *testing.T) { // TestV3LeaseRenewStress keeps creating lease and renewing it immediately to ensure the renewal goes through. // it was oberserved that the immediate lease renewal after granting a lease from follower resulted lease not found. -// related issue https://go.etcd.io/etcd/issues/6978 +// related issue https://github.com/etcd-io/etcd/issues/6978 func TestV3LeaseRenewStress(t *testing.T) { testLeaseStress(t, stressLeaseRenew) } // TestV3LeaseTimeToLiveStress keeps creating lease and retrieving it immediately to ensure the lease can be retrieved. // it was oberserved that the immediate lease retrieval after granting a lease from follower resulted lease not found. -// related issue https://go.etcd.io/etcd/issues/6978 +// related issue https://github.com/etcd-io/etcd/issues/6978 func TestV3LeaseTimeToLiveStress(t *testing.T) { testLeaseStress(t, stressLeaseTimeToLive) } @@ -437,7 +437,7 @@ func TestV3PutOnNonExistLease(t *testing.T) { } // TestV3GetNonExistLease ensures client retrieving nonexistent lease on a follower doesn't result node panic -// related issue https://go.etcd.io/etcd/issues/6537 +// related issue https://github.com/etcd-io/etcd/issues/6537 func TestV3GetNonExistLease(t *testing.T) { defer testutil.AfterTest(t) clus := NewClusterV3(t, &ClusterConfig{Size: 3}) diff --git a/pkg/proxy/server.go b/pkg/proxy/server.go index 911c1a5cd223..6e037ec5d18e 100644 --- a/pkg/proxy/server.go +++ b/pkg/proxy/server.go @@ -289,8 +289,8 @@ func (s *server) To() string { // TODO: implement packet reordering from multiple TCP connections // buffer packets per connection for awhile, reorder before transmit -// - https://go.etcd.io/etcd/issues/5614 -// - https://go.etcd.io/etcd/pull/6918#issuecomment-264093034 +// - https://github.com/etcd-io/etcd/issues/5614 +// - https://github.com/etcd-io/etcd/pull/6918#issuecomment-264093034 func (s *server) listenAndServe() { defer s.closeWg.Done() diff --git a/raft/raft_test.go b/raft/raft_test.go index 664c5addb7ee..f05543aaa8df 100644 --- a/raft/raft_test.go +++ b/raft/raft_test.go @@ -1524,7 +1524,7 @@ func TestHandleHeartbeatResp(t *testing.T) { // TestRaftFreesReadOnlyMem ensures raft will free read request from // readOnly readIndexQueue and pendingReadIndex map. -// related issue: https://go.etcd.io/etcd/issues/7571 +// related issue: https://github.com/etcd-io/etcd/issues/7571 func TestRaftFreesReadOnlyMem(t *testing.T) { sm := newTestRaft(1, []uint64{1, 2}, 5, 1, NewMemoryStorage()) sm.becomeCandidate() diff --git a/tools/etcd-dump-db/backend.go b/tools/etcd-dump-db/backend.go index c8f5e831f7ba..8b06a7b0d2cf 100644 --- a/tools/etcd-dump-db/backend.go +++ b/tools/etcd-dump-db/backend.go @@ -111,7 +111,7 @@ func iterateBucket(dbPath, bucket string, limit uint64, decode bool) (err error) // iterate in reverse order (use First() and Next() for ascending order) for k, v := c.Last(); k != nil; k, v = c.Prev() { // TODO: remove sensitive information - // (https://go.etcd.io/etcd/issues/7620) + // (https://github.com/etcd-io/etcd/issues/7620) if dec, ok := decoders[bucket]; decode && ok { dec(k, v) } else {