Golang实践:分布式锁
发布于 2021-02-09 · 本文总共 3158 字 · 阅读大约需要
10 分钟
并发修改全局变量
代码
package redis_go
import (
"fmt"
"sync"
)
var counter int
func CounterInr() {
var wg sync.WaitGroup
for i := 0; i < 1000; i++ {
wg.Add(1)
go func() {
defer wg.Done()
counter++
}()
}
wg.Wait()
fmt.Println(counter)
}
测试
redis_go/counter_test.go
package redis_go
import "testing"
func TestCounterInr(t *testing.T) {
for i := 0; i < 10; i++{
CounterInr()
}
}
// === RUN TestCounterInr
// 910
// 863
// 912
// 884
// 865
// 865
// 882
// 800
// 836
// 864
// --- PASS: TestCounterInr (0.01s)
// PASS
多次运行会得到不同的结果
加锁
代码
package redis_go
import (
"fmt"
"sync"
)
func CounterInrWithLock() {
var counter int
var wg sync.WaitGroup
var l sync.Mutex
for i := 0; i < 1000; i++ {
wg.Add(1)
go func() {
defer wg.Done()
l.Lock()
defer l.Unlock()
counter++
}()
}
wg.Wait()
fmt.Println(counter)
}
测试
package redis_go
import "testing"
func TestCounterInrWithLock(t *testing.T) {
for i := 0; i < 10; i++{
CounterInrWithLock()
}
}
// === RUN TestCounterInrWithLock
// 1000
// 1000
// 1000
// 1000
// 1000
// 1000
// 1000
// 1000
// 1000
// 1000
// --- PASS: TestCounterInrWithLock (0.01s)
// PASS
Redis
代码
package redis_go
import (
"fmt"
"github.com/go-redis/redis"
"sync"
"time"
)
func Incr() {
client := redis.NewClient(
&redis.Options{
Addr: "localhost:6379",
Password: "",
DB: 0,
},
)
lockKey := "counter_lock"
counterKey := "counter"
resp := client.SetNX(lockKey, 1, time.Second*5)
lockSuccess, err := resp.Result()
if err != nil || !lockSuccess {
fmt.Printf("Lock failed: %+v, %+v\n", lockSuccess, err)
return
}
counterResp := client.Get(counterKey)
counterValue, err := counterResp.Int64()
if err != nil {
counterValue++
resp := client.Set(counterKey, counterValue, 0)
if _, err := resp.Result(); err != nil {
fmt.Printf("Set counter value error: %+v\n", err)
}
}
fmt.Printf("Current counter value is: %+v\n", counterValue)
delResp := client.Del(lockKey)
unlockResult, err := delResp.Result()
if err == nil && unlockResult > 0 {
fmt.Printf("Unlock success.\n")
} else {
fmt.Printf("unlock failed: %+v\n", err)
}
}
func CounterWithRedisLock() {
var wg sync.WaitGroup
for i := 0; i < 10; i++ {
wg.Add(1)
go func() {
defer wg.Done()
Incr()
}()
}
wg.Wait()
}
测试
package redis_go
import "testing"
func TestCounterWithRedisLock(t *testing.T) {
CounterWithRedisLock()
}
// === RUN TestCounterWithRedisLock
// Lock failed: false, <nil>
// Lock failed: false, <nil>
// Lock failed: false, <nil>
// Lock failed: false, <nil>
// Lock failed: false, <nil>
// Lock failed: false, <nil>
// Lock failed: false, <nil>
// Lock failed: false, <nil>
// Lock failed: false, <nil>
// Current counter value is: 1
// Unlock success.
// --- PASS: TestCounterWithRedisLock (0.01s)
// PASS
etcd
代码
package etcd_go
import (
"fmt"
"github.com/zieckey/etcdsync"
)
func LockWithETCD(){
m, err := etcdsync.New("/lock", 10, []string{"http://127.0.0.1:2379"})
if m == nil || err != nil{
fmt.Printf("etcd sync new failed: %+v\n", err)
return
}
if err := m.Lock(); err != nil{
fmt.Printf("etcd sync lock failed: %+v\n", err)
return
}
fmt.Printf("etcd lock success.")
if err := m.Unlock(); err != nil{
fmt.Printf("etcd unlock failed: %+v\n", err)
}
fmt.Printf("etcd unlock success.")
}
选型比较
业务量不大时,使用单机锁方案就可以;
发展到分布式服务阶段,QPS很小的情况下,锁的方案都差不多;尽量使用已有的ZooKeeper、ETCD或者Redis集群;
业务发展到一定量级,如果允许恶劣条件下数据丢失,可以使用Redis的setnx的简单锁; 如果数据可靠性要求高,只能使用ETCD或者ZooKeeper(通过一致性协议保证数据可靠性);
refs
https://books.studygolang.com/advanced-go-programming-book/ch6-cloud/ch6-01-lock.html