标签:
commit: d577db99873cbf04b8e17b78f17ec8f3a27eca30
Date: Fri Apr 10 23:45:36 2015 -0700
Perform a rolling update of the given ReplicationController.
Replaces the specified controller with new controller, updating one pod at a time to use the
new PodTemplate. The new-controller.json must specify the same namespace as the
existing controller and overwrite at least one (common) label in its replicaSelector.
kubectl rolling-update OLD_CONTROLLER_NAME -f NEW_CONTROLLER_SPEC
// Update pods of frontend-v1 using new controller data in frontend-v2.json.
$ kubectl rolling-update frontend-v1 -f frontend-v2.json
// Update pods of frontend-v1 using JSON data passed into stdin.
$ cat frontend-v2.json | kubectl rolling-update frontend-v1 -f -
ReplicationController,简称rc,是kubernet体系中某一种类型pod的集合,rc有一个关键参数叫做replicas,也是就是pod的数量。
那么rc有什么用呢?这是为了解决在集群上一堆pod中有些如果挂了,那么就在别的宿主机上把容器启动起来,并让业务流量导入到正确启动的pod上。也就是说,rc保证了集群服务的可用性,当你有很多个服务启动在一个集群中,你需要用程序去监控这些服务的运行状况,并动态保证服务可用。
rc和pod的对应关系是怎么样的?rc通过selector来选择一些pod作为他的控制范围。只要pod的标签(label)符合seletor,则属于这个rc,下面是pod和rc的示例。
xx-controller.json
"spec":{
"replicas":1,
"selector":{
"name":"redis",
"role":"master"
},
xx-pod.json
"labels": {
"name": "redis"
},
kubernetes被我们简称为k8s,如果对其中的基础概念有兴趣可以看这篇
/cmd/kubectl/kubctl.go
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
cmd := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, os.Stdout, os.Stderr)
if err := cmd.Execute(); err != nil {
os.Exit(1)
}
}
源代码在pkg包内,/pkg/kubectl/cmd/cmd.go,每个子命令都实现统一的接口,rollingupdate这行是:
cmds.AddCommand(NewCmdRollingUpdate(f, out))
这个函数的实现在:/pkg/kubectl/cmd/rollingupdate.go
func NewCmdRollingUpdate(f *cmdutil.Factory, out io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "rolling-update OLD_CONTROLLER_NAME -f NEW_CONTROLLER_SPEC",
// rollingupdate is deprecated.
Aliases: []string{"rollingupdate"},
Short: "Perform a rolling update of the given ReplicationController.",
Long: rollingUpdate_long,
Example: rollingUpdate_example,
Run: func(cmd *cobra.Command, args []string) {
err := RunRollingUpdate(f, out, cmd, args)
cmdutil.CheckErr(err)
},
}
}
可以看到实际调用时的执行函数是RunRollingUpdate
,算是进入正题了
func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string) error {
...
mapper, typer := f.Object()
// TODO: use resource.Builder instead
obj, err := resource.NewBuilder(mapper, typer, f.ClientMapperForCommand()).
NamespaceParam(cmdNamespace).RequireNamespace().
FilenameParam(filename).
Do().
Object()
if err != nil {
return err
}
newRc, ok := obj.(*api.ReplicationController)
if !ok {
return cmdutil.UsageError(cmd, "%s does not specify a valid ReplicationController", filename)
}
这是建立一个新的rc的代码,其中resource是kubneter所有资源(pod,service,rc)的基类。可以看到新的rc从json参数文件中获取所有信息,然后转义为ReplicationController这个类。
if oldName == newName {
return cmdutil.UsageError(cmd, "%s cannot have the same name as the existing ReplicationController %s",
filename, oldName)
}
var hasLabel bool
for key, oldValue := range oldRc.Spec.Selector {
if newValue, ok := newRc.Spec.Selector[key]; ok && newValue != oldValue {
hasLabel = true
break
}
}
if !hasLabel {
return cmdutil.UsageError(cmd, "%s must specify a matching key with non-equal value in Selector for %s",
filename, oldName)
}
这里可以看到,对于新的rc和旧的rc,有2项限制,一个是新旧名字需要不同,另一个是rc的selector中需要至少有一项的值不一样。
updater := kubectl.NewRollingUpdater(newRc.Namespace, client)
// fetch rc
oldRc, err := client.ReplicationControllers(newRc.Namespace).Get(oldName)
if err != nil {
return err
}
...
err = updater.Update(out, oldRc, newRc, period, interval, timeout)
if err != nil {
return err
}
在做rolling update的时候,有两个条件限制,一个是新的rc的名字需要和旧的不一样,第二是至少有个一个标签的值不一样。其中namespace是k8s用来做多租户资源隔离的,可以先忽略不计。
这段代码出现了NewRollingUpdater
,是在上一层的/pkg/kubectl/rollingupdate.go这个文件中,更加接近主体了
// RollingUpdater provides methods for updating replicated pods in a predictable,
// fault-tolerant way.
type RollingUpdater struct {
// Client interface for creating and updating controllers
c client.Interface
// Namespace for resources
ns string
}
可以看到这里的RollingUpdater里面是一个k8s的client的结构来向api server发送命令
func (r *RollingUpdater) Update(out io.Writer, oldRc, newRc *api.ReplicationController, updatePeriod, interval, timeout time.Duration) error {
oldName := oldRc.ObjectMeta.Name
newName := newRc.ObjectMeta.Name
retry := &RetryParams{interval, timeout}
waitForReplicas := &RetryParams{interval, timeout}
if newRc.Spec.Replicas <= 0 {
return fmt.Errorf("Invalid controller spec for %s; required: > 0 replicas, actual: %s\n", newName, newRc.Spec)
}
desired := newRc.Spec.Replicas
sourceId := fmt.Sprintf("%s:%s", oldName, oldRc.ObjectMeta.UID)
// look for existing newRc, incase this update was previously started but interrupted
rc, existing, err := r.getExistingNewRc(sourceId, newName)
if existing {
fmt.Fprintf(out, "Continuing update with existing controller %s.\n", newName)
if err != nil {
return err
}
replicas := rc.ObjectMeta.Annotations[desiredReplicasAnnotation]
desired, err = strconv.Atoi(replicas)
if err != nil {
return fmt.Errorf("Unable to parse annotation for %s: %s=%s",
newName, desiredReplicasAnnotation, replicas)
}
newRc = rc
} else {
fmt.Fprintf(out, "Creating %s\n", newName)
if newRc.ObjectMeta.Annotations == nil {
newRc.ObjectMeta.Annotations = map[string]string{}
}
newRc.ObjectMeta.Annotations[desiredReplicasAnnotation] = fmt.Sprintf("%d", desired)
newRc.ObjectMeta.Annotations[sourceIdAnnotation] = sourceId
newRc.Spec.Replicas = 0
newRc, err = r.c.ReplicationControllers(r.ns).Create(newRc)
if err != nil {
return err
}
}
// +1, -1 on oldRc, newRc until newRc has desired number of replicas or oldRc has 0 replicas
for newRc.Spec.Replicas < desired && oldRc.Spec.Replicas != 0 {
newRc.Spec.Replicas += 1
oldRc.Spec.Replicas -= 1
fmt.Printf("At beginning of loop: %s replicas: %d, %s replicas: %d\n",
oldName, oldRc.Spec.Replicas,
newName, newRc.Spec.Replicas)
fmt.Fprintf(out, "Updating %s replicas: %d, %s replicas: %d\n",
oldName, oldRc.Spec.Replicas,
newName, newRc.Spec.Replicas)
newRc, err = r.resizeAndWait(newRc, retry, waitForReplicas)
if err != nil {
return err
}
time.Sleep(updatePeriod)
oldRc, err = r.resizeAndWait(oldRc, retry, waitForReplicas)
if err != nil {
return err
}
fmt.Printf("At end of loop: %s replicas: %d, %s replicas: %d\n",
oldName, oldRc.Spec.Replicas,
newName, newRc.Spec.Replicas)
}
// delete remaining replicas on oldRc
if oldRc.Spec.Replicas != 0 {
fmt.Fprintf(out, "Stopping %s replicas: %d -> %d\n",
oldName, oldRc.Spec.Replicas, 0)
oldRc.Spec.Replicas = 0
oldRc, err = r.resizeAndWait(oldRc, retry, waitForReplicas)
// oldRc, err = r.resizeAndWait(oldRc, interval, timeout)
if err != nil {
return err
}
}
// add remaining replicas on newRc
if newRc.Spec.Replicas != desired {
fmt.Fprintf(out, "Resizing %s replicas: %d -> %d\n",
newName, newRc.Spec.Replicas, desired)
newRc.Spec.Replicas = desired
newRc, err = r.resizeAndWait(newRc, retry, waitForReplicas)
if err != nil {
return err
}
}
// Clean up annotations
if newRc, err = r.c.ReplicationControllers(r.ns).Get(newName); err != nil {
return err
}
delete(newRc.ObjectMeta.Annotations, sourceIdAnnotation)
delete(newRc.ObjectMeta.Annotations, desiredReplicasAnnotation)
newRc, err = r.updateAndWait(newRc, interval, timeout)
if err != nil {
return err
}
// delete old rc
fmt.Fprintf(out, "Update succeeded. Deleting %s\n", oldName)
return r.c.ReplicationControllers(r.ns).Delete(oldName)
}
这段代码很长,但做的事情很简单:
接上一节的resizeAndWait,代码在/pkg/kubectl/resize.go,这里的具体代码就不贴了
其余的所有调用都发生/pkg/client这个目录下,这是一个http/json的client,主要功能就是向api-server发送请求
整体来说,上面的wait的实现都是比较土的,就是发一个update请求过去,后面轮询的调用get来检测状态是否符合最终需要的状态。
先说一下这三个时间参数的作用:
update-period:新rc增加一个pod后,等待这个period,然后从旧rc缩减一个pod
poll-interval:这个函数名来源于linux上的poll调用,就是每过一个poll-interval,向服务端发起请求,直到这个请求成功或者报失败
timeout:总操作的超时时间
rolling update主要是客户端这边实现的,分析完了,但还是有一些未知的问题,例如:
标签:
原文地址:http://my.oschina.net/HardySimpson/blog/403908