diff --git a/build/docker/Dockerfile.baremetal-agent b/build/docker/Dockerfile.baremetal-agent index 8ea045523d8..d445c9e75ad 100644 --- a/build/docker/Dockerfile.baremetal-agent +++ b/build/docker/Dockerfile.baremetal-agent @@ -1,4 +1,4 @@ -FROM registry.cn-beijing.aliyuncs.com/yunionio/baremetal-base:v0.3.9-20231219.0 +FROM registry.cn-beijing.aliyuncs.com/yunionio/baremetal-base:v0.3.9-20251120.1 MAINTAINER "Zexi Li " diff --git a/build/docker/Dockerfile.baremetal-base b/build/docker/Dockerfile.baremetal-base index 3f9213b52f0..0e0507e2a51 100644 --- a/build/docker/Dockerfile.baremetal-base +++ b/build/docker/Dockerfile.baremetal-base @@ -1,7 +1,7 @@ #FROM --platform=linux/amd64 registry.cn-beijing.aliyuncs.com/yunionio/centos-build:1.1-4 as build #RUN yum install -y https://iso.yunion.cn/vm-images/baremetal-pxerom-1.1.0-21092209.x86_64.rpm #RUN yum install -y http://192.168.23.50:8083/baremetal-pxerom-1.1.0-21092209.x86_64.rpm -FROM registry.cn-beijing.aliyuncs.com/yunionio/yunionos:v3.10.8-20231215.0 as yunionos +FROM registry.cn-beijing.aliyuncs.com/yunionio/yunionos:v4.0.0-20251118.0 as yunionos FROM centos:8 as grub-stage diff --git a/build/docker/Makefile b/build/docker/Makefile index df680a4d5fd..29b2f87d4cf 100644 --- a/build/docker/Makefile +++ b/build/docker/Makefile @@ -30,7 +30,7 @@ WEBCONSOLE_BASE_VERSION = 20230731.2 webconsole-base: $(DOCKER_BUILDX)/webconsole-base:$(WEBCONSOLE_BASE_VERSION) -f ./Dockerfile.webconsole-base . -BAREMETAL_BASE_VERSION = v0.3.9-20231219.1 +BAREMETAL_BASE_VERSION = v0.3.9-20251112.1 baremetal-base: $(DOCKER_BUILDX)/baremetal-base:$(BAREMETAL_BASE_VERSION) -f ./Dockerfile.baremetal-base . diff --git a/pkg/apis/compute/api.go b/pkg/apis/compute/api.go index 777ca6c4ba9..67fbe06aa3d 100644 --- a/pkg/apis/compute/api.go +++ b/pkg/apis/compute/api.go @@ -312,6 +312,7 @@ type BaremetalDiskConfig struct { RA *bool `json:"ra,omitempty"` WT *bool `json:"wt,omitempty"` Direct *bool `json:"direct,omitempty"` + SoftRaidIdx *int `json:"soft_raid_idx"` } type RootDiskMatcherSizeMBRange struct { diff --git a/pkg/apis/compute/baremetal_const.go b/pkg/apis/compute/baremetal_const.go index 65f091da9aa..a8577a0ca75 100644 --- a/pkg/apis/compute/baremetal_const.go +++ b/pkg/apis/compute/baremetal_const.go @@ -64,6 +64,11 @@ var ( DISK_DRIVER_ADAPTECRAID, ) + DISK_DRIVERS_SOFT_RAID = sets.NewString( + DISK_DRIVER_LINUX, + DISK_DRIVER_PCIE, + ) + DISK_DRIVERS = sets.NewString( DISK_DRIVER_LINUX, DISK_DRIVER_PCIE).Union(DISK_DRIVERS_RAID) diff --git a/pkg/baremetal/manager.go b/pkg/baremetal/manager.go index 14e880d3303..ffb713a06d9 100644 --- a/pkg/baremetal/manager.go +++ b/pkg/baremetal/manager.go @@ -56,7 +56,9 @@ import ( "yunion.io/x/onecloud/pkg/baremetal/utils/disktool" "yunion.io/x/onecloud/pkg/baremetal/utils/grub" "yunion.io/x/onecloud/pkg/baremetal/utils/ipmitool" + raid2 "yunion.io/x/onecloud/pkg/baremetal/utils/raid" raiddrivers "yunion.io/x/onecloud/pkg/baremetal/utils/raid/drivers" + "yunion.io/x/onecloud/pkg/baremetal/utils/raid/mdadm" "yunion.io/x/onecloud/pkg/baremetal/utils/uefi" "yunion.io/x/onecloud/pkg/cloudcommon/types" "yunion.io/x/onecloud/pkg/compute/baremetal" @@ -2671,15 +2673,32 @@ func (s *SBaremetalServer) NewConfigedSSHPartitionTool(term *ssh.Client) (*diskt return nil, fmt.Errorf("CalculateLayout: %v", err) } + log.Errorf("NewConfigedSSHPartitionTool layouts: %s", jsonutils.Marshal(layouts)) diskConfs := baremetal.GroupLayoutResultsByDriverAdapter(layouts) for _, dConf := range diskConfs { driver := dConf.Driver adapter := dConf.Adapter + isSoftRaid := baremetal.DISK_DRIVERS_SOFT_RAID.Has(driver) + raidDrv := raiddrivers.GetDriver(driver, term) if raidDrv != nil { if err := raidDrv.ParsePhyDevs(); err != nil { return nil, fmt.Errorf("RaidDriver %s parse physical devices: %v", raidDrv.GetName(), err) } + if isSoftRaid { + devs := make([]*baremetal.BaremetalStorage, 0) + for _, layout := range layouts { + if len(layout.Disks) > 0 && layout.Disks[0].Driver == driver && layout.Disks[0].Adapter == dConf.Adapter { + devs = append(devs, layout.Disks...) + } + } + + log.Infof("SetDevicesForAdapter %v", jsonutils.Marshal(devs)) + if mdadmDrver, ok := raidDrv.(raid2.IRaidDeviceSetter); ok { + mdadmDrver.SetDevicesForAdapter(dConf.Adapter, devs) + } + } + if err := raiddrivers.PostBuildRaid(raidDrv, adapter); err != nil { return nil, fmt.Errorf("Build %s raid failed: %v", raidDrv.GetName(), err) } @@ -2731,11 +2750,27 @@ func (s *SBaremetalServer) DoDiskConfig(term *ssh.Client) (*disktool.SSHPartitio for _, dConf := range diskConfs { driver := dConf.Driver adapter := dConf.Adapter + isSoftRaid := baremetal.DISK_DRIVERS_SOFT_RAID.Has(driver) + raidDrv := raiddrivers.GetDriver(driver, term) if raidDrv != nil { if err := raidDrv.ParsePhyDevs(); err != nil { return nil, fmt.Errorf("RaidDriver %s parse physical devices: %v", raidDrv.GetName(), err) } + if isSoftRaid { + devs := make([]*baremetal.BaremetalStorage, 0) + for _, layout := range layouts { + if len(layout.Disks) > 0 && layout.Disks[0].Driver == driver && layout.Disks[0].Adapter == dConf.Adapter { + devs = append(devs, layout.Disks...) + } + } + + log.Infof("SetDevicesForAdapter %v", jsonutils.Marshal(devs)) + if mdadmDriver, ok := raidDrv.(raid2.IRaidDeviceSetter); ok { + mdadmDriver.SetDevicesForAdapter(dConf.Adapter, devs) + } + } + if err := raiddrivers.BuildRaid(raidDrv, dConf.Configs, adapter); err != nil { return nil, fmt.Errorf("Build %s raid failed: %v", raidDrv.GetName(), err) } @@ -2779,6 +2814,10 @@ func (s *SBaremetalServer) DoDiskUnconfig(term *ssh.Client) error { } func (s *SBaremetalServer) DoEraseDisk(term *ssh.Client) error { + // soft raid should stop mdadm first + if err := mdadm.CleanRaid(term); err != nil { + return err + } cmd := "/lib/mos/partdestroy.sh" _, err := term.Run(cmd) return err @@ -2839,6 +2878,7 @@ func (s *SBaremetalServer) DoPartitionDisk(tool *disktool.SSHPartitionTool, term rootImageId := s.GetRootTemplateId() diskOffset := 0 rootDisk := tool.GetRootDisk() + log.Infof("root disk name %s", rootDisk.GetDevName()) if len(rootImageId) > 0 { rootDiskObj := disks[0] rootSize, _ := rootDiskObj.Int("size") diff --git a/pkg/baremetal/tasks/create.go b/pkg/baremetal/tasks/create.go index b6790d1b62d..5861dd4a6bc 100644 --- a/pkg/baremetal/tasks/create.go +++ b/pkg/baremetal/tasks/create.go @@ -54,13 +54,13 @@ func (self *SBaremetalServerCreateTask) RemoveEFIOSEntry() bool { } func (self *SBaremetalServerCreateTask) DoDeploys(ctx context.Context, term *ssh.Client) (jsonutils.JSONObject, error) { - // Build raid - tool, err := self.Baremetal.GetServer().DoDiskConfig(term) - if err != nil { + if err := self.Baremetal.GetServer().DoEraseDisk(term); err != nil { return nil, self.onError(ctx, term, err) } time.Sleep(2 * time.Second) - if err := self.Baremetal.GetServer().DoEraseDisk(term); err != nil { + // Build raid + tool, err := self.Baremetal.GetServer().DoDiskConfig(term) + if err != nil { return nil, self.onError(ctx, term, err) } time.Sleep(2 * time.Second) diff --git a/pkg/baremetal/utils/disktool/disktool.go b/pkg/baremetal/utils/disktool/disktool.go index 3cfa29a10c0..242be6f089c 100644 --- a/pkg/baremetal/utils/disktool/disktool.go +++ b/pkg/baremetal/utils/disktool/disktool.go @@ -17,6 +17,7 @@ package disktool import ( "fmt" "math" + "strconv" "strings" "yunion.io/x/jsonutils" @@ -229,7 +230,7 @@ type DiskPartitions struct { partitions []*Partition } -func newDiskPartitions(driver string, adapter int, raidConfig string, sizeMB int64, blockSize int64, diskType string, tool *PartitionTool) *DiskPartitions { +func newDiskPartitions(driver string, adapter int, raidConfig string, sizeMB int64, blockSize int64, diskType string, softRaidIdx *int, tool *PartitionTool) *DiskPartitions { ps := new(DiskPartitions) ps.driver = driver ps.adapter = adapter @@ -239,9 +240,32 @@ func newDiskPartitions(driver string, adapter int, raidConfig string, sizeMB int ps.blockSize = blockSize ps.diskType = diskType ps.partitions = make([]*Partition, 0) + + // soft raid, mdadm + if softRaidIdx != nil { + ps.GetMdadmInfo(softRaidIdx) + } return ps } +func (ps *DiskPartitions) GetMdadmInfo(softRaidIdx *int) { + devLinkName := fmt.Sprintf("/dev/md/md%d", *softRaidIdx) + devLinkNickname := fmt.Sprintf("/dev/md/md%d_0", *softRaidIdx) + cmd := fmt.Sprintf("readlink -f $(test -e %s && echo %s || echo %s)", devLinkName, devLinkName, devLinkNickname) + out, err := ps.tool.Run(cmd) + if err != nil || len(out) == 0 { + log.Errorf("failed readlink of %s: %s", devLinkName, err) + return + } + + ps.dev = strings.TrimSpace(out[0]) + ps.devName = ps.dev + uuid, sectors := ps.tool.GetMdadmUuidAndSector(ps.dev) + ps.pciPath = uuid + ps.sectors = sectors + ps.blockSize = 512 +} + func (p *DiskPartitions) IsRaidDriver() bool { return utils.IsInStringArray(p.driver, []string{ baremetal.DISK_DRIVER_MEGARAID, @@ -324,7 +348,7 @@ func (ps *DiskPartitions) IsReady() bool { func (ps *DiskPartitions) GetDevName() string { devName := ps.devName - if !ps.IsRaidDriver() || ps.raidConfig == baremetal.DISK_CONF_NONE { + if ps.raidConfig == baremetal.DISK_CONF_NONE { return devName } raidDrv, err := raiddrivers.GetDriverWithInit(ps.driver, ps.tool.runner.Term()) @@ -690,12 +714,13 @@ func (tool *PartitionTool) parseLsDisk(lines []string, driver string) { func (tool *PartitionTool) FetchDiskConfs(diskConfs []baremetal.DiskConfiguration) *PartitionTool { for _, d := range diskConfs { - disk := newDiskPartitions(d.Driver, d.Adapter, d.RaidConfig, d.Size, d.Block, d.DiskType, tool) + disk := newDiskPartitions(d.Driver, d.Adapter, d.RaidConfig, d.Size, d.Block, d.DiskType, d.SoftRaidIdx, tool) tool.disks = append(tool.disks, disk) + isSoftRaid := d.RaidConfig != baremetal.DISK_CONF_NONE var key string - if d.Driver == baremetal.DISK_DRIVER_LINUX { + if d.Driver == baremetal.DISK_DRIVER_LINUX && !isSoftRaid { key = NONRAID_DRIVER - } else if d.Driver == baremetal.DISK_DRIVER_PCIE { + } else if d.Driver == baremetal.DISK_DRIVER_PCIE && !isSoftRaid { key = PCIE_DRIVER } else { key = RAID_DRVIER @@ -768,7 +793,42 @@ func (tool *PartitionTool) IsAllDisksReady() bool { return true } +func (tool *PartitionTool) GetMdadmUuidAndSector(devPath string) (string, int64) { + var uuid string + var sectorsRet int64 + // get md uuid as pci path + cmd := fmt.Sprintf("/sbin/mdadm --detail %s | grep UUID", devPath) + output, err := tool.Run(cmd) + if err == nil && len(output) > 0 { + uuidSeg := output[0] + segs := strings.SplitN(strings.TrimSpace(uuidSeg), ":", 2) + if len(segs) == 2 { + uuid = strings.TrimSpace(segs[1]) + } + } + + // get block size + cmd = fmt.Sprintf("blockdev --getsz %s 2>/dev/null || echo 0", devPath) + output, err = tool.Run(cmd) + if err == nil && len(output) > 0 { + if sectors, err := strconv.ParseInt(strings.TrimSpace(output[0]), 10, 64); err == nil { + sectorsRet = sectors + } + } + return uuid, sectorsRet +} + func (tool *PartitionTool) RetrieveDiskInfo(rootMatcher *api.BaremetalRootDiskMatcher) error { + for _, disk := range tool.disks { + if baremetal.DISK_DRIVERS_SOFT_RAID.Has(disk.driver) && disk.raidConfig != baremetal.DISK_CONF_NONE { + log.Infof("Soft raid mdadm set diskinfo dev %s", disk.dev) + uuid, sectors := tool.GetMdadmUuidAndSector(disk.dev) + disk.pciPath = uuid + disk.sectors = sectors + disk.blockSize = 512 + } + } + for _, driver := range []string{RAID_DRVIER, NONRAID_DRIVER, PCIE_DRIVER} { cmd := fmt.Sprintf("/lib/mos/lsdisk --%s", driver) ret, err := tool.Run(cmd) diff --git a/pkg/baremetal/utils/raid/drivers/drivers.go b/pkg/baremetal/utils/raid/drivers/drivers.go index 4237aa04f8b..a2f19c69824 100644 --- a/pkg/baremetal/utils/raid/drivers/drivers.go +++ b/pkg/baremetal/utils/raid/drivers/drivers.go @@ -25,6 +25,7 @@ import ( "yunion.io/x/onecloud/pkg/baremetal/utils/raid" _ "yunion.io/x/onecloud/pkg/baremetal/utils/raid/adaptec" _ "yunion.io/x/onecloud/pkg/baremetal/utils/raid/hpssactl" + _ "yunion.io/x/onecloud/pkg/baremetal/utils/raid/mdadm" _ "yunion.io/x/onecloud/pkg/baremetal/utils/raid/megactl" _ "yunion.io/x/onecloud/pkg/baremetal/utils/raid/mvcli" _ "yunion.io/x/onecloud/pkg/baremetal/utils/raid/sas2iru" diff --git a/pkg/baremetal/utils/raid/interface.go b/pkg/baremetal/utils/raid/interface.go index 0b9e0b9a25a..17e875a712a 100644 --- a/pkg/baremetal/utils/raid/interface.go +++ b/pkg/baremetal/utils/raid/interface.go @@ -45,6 +45,10 @@ type IRaidAdapter interface { PostBuildRaid() error } +type IRaidDeviceSetter interface { + SetDevicesForAdapter(int, []*baremetal.BaremetalStorage) +} + type IExecTerm interface { Run(cmds ...string) ([]string, error) RunWithInput(input io.Reader, cmds ...string) ([]string, error) diff --git a/pkg/baremetal/utils/raid/mdadm/doc.go b/pkg/baremetal/utils/raid/mdadm/doc.go new file mode 100644 index 00000000000..4a9d3ebe8de --- /dev/null +++ b/pkg/baremetal/utils/raid/mdadm/doc.go @@ -0,0 +1 @@ +package mdadm // import "yunion.io/x/onecloud/pkg/baremetal/utils/raid/mdadm" diff --git a/pkg/baremetal/utils/raid/mdadm/mdadm.go b/pkg/baremetal/utils/raid/mdadm/mdadm.go new file mode 100644 index 00000000000..6112f818280 --- /dev/null +++ b/pkg/baremetal/utils/raid/mdadm/mdadm.go @@ -0,0 +1,434 @@ +// Copyright 2019 Yunion +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mdadm + +import ( + "fmt" + "path" + "regexp" + "strconv" + "strings" + "time" + + "yunion.io/x/log" + "yunion.io/x/pkg/errors" + + api "yunion.io/x/onecloud/pkg/apis/compute" + "yunion.io/x/onecloud/pkg/baremetal/utils/raid" + "yunion.io/x/onecloud/pkg/compute/baremetal" + "yunion.io/x/onecloud/pkg/util/ssh" + "yunion.io/x/onecloud/pkg/util/sysutils" +) + +const ( + MDADM_BIN = "/sbin/mdadm" +) + +func init() { + raid.RegisterDriver(baremetal.DISK_DRIVER_LINUX, NewMdadmRaidLinux) + raid.RegisterDriver(baremetal.DISK_DRIVER_PCIE, NewMdadmRaidPcie) +} + +type MdadmRaid struct { + term raid.IExecTerm + adapter *MdadmRaidAdapter + driverName string +} + +func NewMdadmRaidLinux(term raid.IExecTerm) raid.IRaidDriver { + return &MdadmRaid{ + term: term, + driverName: baremetal.DISK_DRIVER_LINUX, + } +} + +func NewMdadmRaidPcie(term raid.IExecTerm) raid.IRaidDriver { + return &MdadmRaid{ + term: term, + driverName: baremetal.DISK_DRIVER_PCIE, + } +} + +func (r *MdadmRaid) GetName() string { + return r.driverName +} + +func (r *MdadmRaid) ParsePhyDevs() error { + if r.adapter == nil { + r.adapter = &MdadmRaidAdapter{ + raid: r, + term: r.term, + index: 0, + } + } + return nil +} + +func (r *MdadmRaid) SetDevicesForAdapter(adapterIdx int, devs []*baremetal.BaremetalStorage) { + r.adapter.setDevices(devs) + for i := range devs { + devPath := path.Join("/dev", devs[i].Dev) + cmd := fmt.Sprintf("%s --examine %s | grep UUID", MDADM_BIN, devPath) + output, err := r.term.Run(cmd) + if err == nil && len(output) > 0 { + for _, line := range output { + segs := strings.SplitN(strings.TrimSpace(line), ":", 2) + if len(segs) == 2 { + uuid := strings.TrimSpace(segs[1]) + cmd = fmt.Sprintf("%s --assemble --scan --uuid=%s", MDADM_BIN, uuid) + output, err := r.term.Run(cmd) + if err != nil { + log.Errorf("faield assemble mdadm %s: %s", uuid, output) + } + } + } + } + } +} + +func (r *MdadmRaid) GetAdapters() []raid.IRaidAdapter { + return []raid.IRaidAdapter{r.adapter} +} + +func (r *MdadmRaid) PreBuildRaid(confs []*api.BaremetalDiskConfig, adapterIdx int) error { + return nil +} + +func deviceHasRaid(devPath string, term *ssh.Client) bool { + cmd := fmt.Sprintf("%s --examine %s 2>/dev/null || true", MDADM_BIN, devPath) + output, err := term.Run(cmd) + if err != nil { + log.Errorf("examine device %s: %s", devPath, err) + return false + } + + for _, line := range output { + if strings.Contains(line, "mdadm") || strings.Contains(line, "ARRAY") { + return true + } + } + return false +} + +func (r *MdadmRaid) CleanRaid() error { + return nil +} + +func CleanMdadmPartitions(term *ssh.Client) { + out, err := term.Run("ls -1 /dev/md/") + if err != nil { + log.Errorf("failed get md devices %s, %s", out, err) + return + } + // destory mdadm soft raid + for _, line := range out { + dev := strings.TrimSpace(line) + if !strings.HasPrefix(dev, "md") { + continue + } + out, err = term.Run(fmt.Sprintf("dd if=/dev/zero of=/dev/md/%s bs=512 count=34", dev)) + if err != nil { + log.Errorf("failed clean mdadm partitions %s %s", out, err) + } + out, err = term.Run(fmt.Sprintf("dd if=/dev/zero of=/dev/md/%s bs=512 count=34 seek=$(( $(blockdev --getsz /dev/md/%s) - 34 ))", dev, dev)) + if err != nil { + log.Errorf("failed clean mdadm partitions %s %s", out, err) + } + out, err = term.Run(fmt.Sprintf("hdparm -z /dev/md/%s", dev)) + if err != nil { + log.Errorf("failed clean mdadm partitions %s %s", out, err) + } + } +} + +func CleanRaid(term *ssh.Client) error { + CleanMdadmPartitions(term) + + // stop md devices + cmd := fmt.Sprintf("%s --stop --scan", MDADM_BIN) + _, err := term.Run(cmd) + if err != nil { + log.Warningf("Stop md devices: %s", err) + } + + pcieRet, err := term.Run("/lib/mos/lsdisk --pcie") + if err != nil { + log.Warningf("Fail to retrieve PCIE DISK info %s", err) + } else { + pcieDiskInfo := sysutils.ParsePCIEDiskInfo(pcieRet) + for i := range pcieDiskInfo { + devPath := path.Join("/dev", pcieDiskInfo[i].Dev) + if deviceHasRaid(devPath, term) { + cmd := fmt.Sprintf("%s --zero-superblock --force %s", MDADM_BIN, devPath) + out, err := term.Run(cmd) + if err != nil { + return errors.Wrapf(err, "zero superblock on %s: %s", devPath, out) + } + } + } + } + + nonraidRet, err := term.Run("/lib/mos/lsdisk --nonraid") + if err != nil { + log.Warningf("Fail to retrieve SCSI DISK info %s", err) + } else { + nonraidDiskInfo := sysutils.ParseSCSIDiskInfo(nonraidRet) + for i := range nonraidDiskInfo { + devPath := path.Join("/dev", nonraidDiskInfo[i].Dev) + if deviceHasRaid(devPath, term) { + cmd := fmt.Sprintf("%s --zero-superblock --force %s", MDADM_BIN, devPath) + out, err := term.Run(cmd) + if err != nil { + return errors.Wrapf(err, "zero superblock on %s: %s", devPath, out) + } + } + } + } + + out, err := term.Run("rm /dev/md/*") + if err != nil { + log.Warningf("failed soft link at /dev/md %s", out) + } + + return nil +} + +type MdadmRaidAdapter struct { + raid *MdadmRaid + term raid.IExecTerm + index int + devs []*baremetal.BaremetalStorage +} + +func (a *MdadmRaidAdapter) GetIndex() int { + return a.index +} + +func (a *MdadmRaidAdapter) PreBuildRaid(confs []*api.BaremetalDiskConfig) error { + return nil +} + +func (a *MdadmRaidAdapter) GetLogicVolumes() ([]*raid.RaidLogicalVolume, error) { + lvs := make([]*raid.RaidLogicalVolume, 0) + cmd := "ls -1 /dev/md/* 2>/dev/null || true" + output, err := a.term.Run(cmd) + if err != nil { + return lvs, nil + } + + for _, line := range output { + line = strings.TrimSpace(line) + if strings.HasPrefix(line, "/dev/md/md") { + mdPath := line + numStr := strings.TrimPrefix(line, "/dev/md/md") + if strings.HasSuffix(numStr, "_0") { + numStr = strings.TrimSuffix(numStr, "_0") + } + if num, err := strconv.Atoi(numStr); err == nil { + res, err := a.term.Run(fmt.Sprintf("readlink -f %s", line)) + if err == nil && len(res) > 0 { + mdPath = strings.TrimSpace(res[0]) + lv := &raid.RaidLogicalVolume{ + Index: num, + Adapter: a.index, + BlockDev: mdPath, + } + lvs = append(lvs, lv) + } + } + } + } + + return lvs, nil +} + +func (a *MdadmRaidAdapter) RemoveLogicVolumes() error { + //cmd := fmt.Sprintf("%s --stop --scan", MDADM_BIN) + //_, err := a.term.Run(cmd) + //if err != nil { + // log.Warningf("Stop md devices: %v", err) + //} + return nil +} + +func (a *MdadmRaidAdapter) GetDevices() []*baremetal.BaremetalStorage { + return a.devs +} + +func (a *MdadmRaidAdapter) setDevices(devs []*baremetal.BaremetalStorage) { + a.devs = devs +} + +func (a *MdadmRaidAdapter) BuildRaid0(devs []*baremetal.BaremetalStorage, conf *api.BaremetalDiskConfig) error { + return a.buildRaid("0", devs, conf) +} + +func (a *MdadmRaidAdapter) BuildRaid1(devs []*baremetal.BaremetalStorage, conf *api.BaremetalDiskConfig) error { + return a.buildRaid("1", devs, conf) +} + +func (a *MdadmRaidAdapter) BuildRaid5(devs []*baremetal.BaremetalStorage, conf *api.BaremetalDiskConfig) error { + return a.buildRaid("5", devs, conf) +} + +func (a *MdadmRaidAdapter) BuildRaid10(devs []*baremetal.BaremetalStorage, conf *api.BaremetalDiskConfig) error { + return a.buildRaid("10", devs, conf) +} + +func (a *MdadmRaidAdapter) BuildNoneRaid(devs []*baremetal.BaremetalStorage) error { + return nil +} + +func (a *MdadmRaidAdapter) PostBuildRaid() error { + return nil +} + +func (a *MdadmRaidAdapter) buildRaid(level string, devs []*baremetal.BaremetalStorage, conf *api.BaremetalDiskConfig) error { + if len(devs) == 0 { + return fmt.Errorf("no devices provided for RAID %s", level) + } + + var mdNum int + var err error + if conf.SoftRaidIdx != nil { + mdNum = *conf.SoftRaidIdx + } else { + mdNum, err = a.getNextMdNum() + if err != nil { + return errors.Wrap(err, "get next md number") + } + } + + devPaths := make([]string, 0, len(devs)) + for _, dev := range devs { + if dev.Dev == "" { + return fmt.Errorf("device path is empty for storage") + } + devPaths = append(devPaths, path.Join("/dev", dev.Dev)) + } + + for _, dev := range devPaths { + if err := a.ensureDeviceClean(dev); err != nil { + return errors.Wrapf(err, "clean device %s", dev) + } + } + + mdDev := fmt.Sprintf("/dev/md/md%d", mdNum) + + imsmDev := fmt.Sprintf("/dev/md/imsm%d", mdNum) + cmdImsm := fmt.Sprintf("%s --create %s --metadata=imsm --raid-devices=%d --run --force %s", MDADM_BIN, imsmDev, len(devs), strings.Join(devPaths, " ")) + output, err := a.term.Run(cmdImsm) + if err != nil { + log.Errorf("mdadm create imsm raid %s failed, output: %v %s", level, output, err) + } else { + a.term.Run(fmt.Sprintf("%s --wait %s", MDADM_BIN, imsmDev)) + time.Sleep(time.Second * 3) + } + + args := []string{ + "--create", + mdDev, + fmt.Sprintf("--level=%s", level), + fmt.Sprintf("--raid-devices=%d", len(devs)), + "--force", + "--run", + } + + for _, dev := range devPaths { + args = append(args, dev) + } + + args = append(args, "--assume-clean") + + cmd := fmt.Sprintf("%s %s", MDADM_BIN, strings.Join(args, " ")) + log.Infof("Building software RAID %s: %s", level, cmd) + + output, err = a.term.Run(cmd) + if err != nil { + return errors.Wrapf(err, "mdadm create raid %s failed, output: %v", level, output) + } + + cmd = fmt.Sprintf("%s --wait %s", MDADM_BIN, mdDev) + output, err = a.term.Run(cmd) + if err != nil { + log.Errorf("mdadm wait raid %s failed: %s", mdDev, output) + //return errors.Wrapf(err, "mdadm wait raid %s failed, output: %v", mdDev, output) + } + + log.Infof("Successfully created software RAID %s: /dev/md/md%d, start sync block devs", level, mdNum) + + for i := range devPaths { + flushCmd := fmt.Sprintf("blockdev --flushbufs %s", devPaths[i]) + output, err = a.term.Run(flushCmd) + if err != nil { + return errors.Wrapf(err, "mdadm blockdev flushbufs %s failed, output: %v", devPaths[i], output) + } + } + + output, err = a.term.Run("sync") + if err != nil { + return errors.Wrapf(err, "mdadm %s sync failed, output: %v", mdDev, output) + } + + return nil +} + +func (a *MdadmRaidAdapter) getNextMdNum() (int, error) { + cmd := "ls -1 /dev/md/ 2>/dev/null | grep -E '/dev/md/md[0-9]+$' || true" + output, err := a.term.Run(cmd) + if err != nil { + return 0, errors.Wrap(err, "list md devices") + } + + usedNums := make(map[int]bool) + mdNumRe := regexp.MustCompile(`/dev/md/md(\d+)`) + for _, line := range output { + matches := mdNumRe.FindStringSubmatch(line) + if len(matches) > 1 { + if num, err := strconv.Atoi(matches[1]); err == nil { + usedNums[num] = true + } + } + } + + for i := 0; i < 256; i++ { + if !usedNums[i] { + return i, nil + } + } + + return 0, fmt.Errorf("no available md device number") +} + +func (a *MdadmRaidAdapter) ensureDeviceClean(dev string) error { + cmd := fmt.Sprintf("%s --examine %s 2>/dev/null || true", MDADM_BIN, dev) + output, err := a.term.Run(cmd) + if err != nil { + return errors.Wrapf(err, "examine device %s", dev) + } + + for _, line := range output { + if strings.Contains(line, "mdadm") || strings.Contains(line, "ARRAY") { + cmd := fmt.Sprintf("%s --zero-superblock --force %s", MDADM_BIN, dev) + _, err := a.term.Run(cmd) + if err != nil { + return errors.Wrapf(err, "zero superblock on %s", dev) + } + break + } + } + + return nil +} diff --git a/pkg/compute/baremetal/diskconfig.go b/pkg/compute/baremetal/diskconfig.go index b1a9f895377..87e5f5f5dcd 100644 --- a/pkg/compute/baremetal/diskconfig.go +++ b/pkg/compute/baremetal/diskconfig.go @@ -199,9 +199,9 @@ func MeetConfig( return fmt.Errorf("%v more than 1 storages drivers", storageDrvs) } driver := storageDrvs.List()[0] - if conf.Conf != DISK_CONF_NONE && !DISK_DRIVERS_RAID.Has(driver) { - return fmt.Errorf("BaremetalStorage driver %s not support RAID", driver) - } + //if conf.Conf != DISK_CONF_NONE && !DISK_DRIVERS_RAID.Has(driver) { + // return fmt.Errorf("BaremetalStorage driver %s not support RAID", driver) + //} minDisk := GetMinDiskRequirement(conf.Conf) if len(storages) < minDisk { @@ -365,6 +365,7 @@ func getLayoutConfig(layouts []Layout, onlyRaidDisk bool) []*api.BaremetalDiskCo func CalculateLayout(confs []*api.BaremetalDiskConfig, storages []*BaremetalStorage) (layouts []Layout, err error) { var confIdx = 0 + var softRaidIdx = 0 for len(storages) > 0 { var conf *api.BaremetalDiskConfig if confIdx < len(confs) { @@ -374,8 +375,16 @@ func CalculateLayout(confs []*api.BaremetalDiskConfig, storages []*BaremetalStor noneConf, _ := ParseDiskConfig(DISK_CONF_NONE) conf = &noneConf } - selected, restStorges, rErr := RetrieveStorages(conf, storages) - storages = restStorges + + // is soft raid + if DISK_DRIVERS_SOFT_RAID.Has(conf.Driver) && conf.Conf != DISK_CONF_NONE { + idx := softRaidIdx + conf.SoftRaidIdx = &idx + softRaidIdx += 1 + } + + selected, restStorages, rErr := RetrieveStorages(conf, storages) + storages = restStorages if len(selected) == 0 { err = errors.Wrapf(rErr, "not found matched storages by config: %#v", conf) return @@ -596,12 +605,13 @@ func GetDiskSpecV2(storages []*BaremetalStorage) api.DiskDriverSpec { } type DiskConfiguration struct { - Driver string - Adapter int - RaidConfig string - Block int64 - Size int64 - DiskType string + Driver string + Adapter int + RaidConfig string + Block int64 + Size int64 + DiskType string + SoftRaidIdx *int } func GetDiskConfigurations(layouts []Layout) []DiskConfiguration { @@ -614,34 +624,37 @@ func GetDiskConfigurations(layouts []Layout) []DiskConfiguration { if raidConf == DISK_CONF_NONE { for _, d := range rr.Disks { disks = append(disks, DiskConfiguration{ - Driver: driver, - Adapter: adapter, - RaidConfig: raidConf, - Block: block, - Size: d.Size, - DiskType: rr.Conf.Type, + Driver: driver, + Adapter: adapter, + RaidConfig: raidConf, + Block: block, + Size: d.Size, + DiskType: rr.Conf.Type, + SoftRaidIdx: rr.Conf.SoftRaidIdx, }) } } else { if len(rr.Conf.Size) != 0 { for _, sz := range rr.Conf.Size { disks = append(disks, DiskConfiguration{ - Driver: driver, - Adapter: adapter, - RaidConfig: raidConf, - Block: block, - Size: sz, - DiskType: rr.Conf.Type, + Driver: driver, + Adapter: adapter, + RaidConfig: raidConf, + Block: block, + Size: sz, + DiskType: rr.Conf.Type, + SoftRaidIdx: rr.Conf.SoftRaidIdx, }) } } else { disks = append(disks, DiskConfiguration{ - Driver: driver, - Adapter: adapter, - RaidConfig: raidConf, - Block: block, - Size: rr.Size, - DiskType: rr.Conf.Type, + Driver: driver, + Adapter: adapter, + RaidConfig: raidConf, + Block: block, + Size: rr.Size, + DiskType: rr.Conf.Type, + SoftRaidIdx: rr.Conf.SoftRaidIdx, }) } } diff --git a/pkg/compute/baremetal/types.go b/pkg/compute/baremetal/types.go index 9cd28a7fbd5..faa78622a52 100644 --- a/pkg/compute/baremetal/types.go +++ b/pkg/compute/baremetal/types.go @@ -56,6 +56,8 @@ var ( DISK_DRIVERS_RAID = api.DISK_DRIVERS_RAID + DISK_DRIVERS_SOFT_RAID = api.DISK_DRIVERS_SOFT_RAID + DISK_DRIVERS = api.DISK_DRIVERS ) diff --git a/pkg/hostman/guestfs/sshpart/sshpart.go b/pkg/hostman/guestfs/sshpart/sshpart.go index 7d8cb49c323..fc1c2733d46 100644 --- a/pkg/hostman/guestfs/sshpart/sshpart.go +++ b/pkg/hostman/guestfs/sshpart/sshpart.go @@ -59,7 +59,12 @@ func (p *SSHPartition) GetMountPath() string { } func (p *SSHPartition) GetFsFormat() (string, error) { - cmd := fmt.Sprintf("/lib/mos/partfs.sh %s", p.partDev) + var cmd string + if strings.HasPrefix(p.partDev, "/dev/md") { + cmd = fmt.Sprintf("blkid -o value -s TYPE %s", p.partDev) + } else { + cmd = fmt.Sprintf("/lib/mos/partfs.sh %s", p.partDev) + } ret, err := p.term.Run(cmd) if err != nil { return "", err @@ -375,7 +380,7 @@ func (p *SSHPartition) ListDir(sPath string, caseInsensitive bool) []string { } func (p *SSHPartition) osChown(sPath string, uid, gid int) error { - cmd := fmt.Sprintf("chown %d.%d %s", uid, gid, sPath) + cmd := fmt.Sprintf("chown %d:%d %s", uid, gid, sPath) _, err := p.term.Run(cmd) return err } diff --git a/pkg/util/ssh/ssh.go b/pkg/util/ssh/ssh.go index 2bb653707e4..170b72aeb22 100644 --- a/pkg/util/ssh/ssh.go +++ b/pkg/util/ssh/ssh.go @@ -207,7 +207,6 @@ func (s *Client) run(parseOutput bool, cmds []string, input io.Reader, withPty b ret = append(ret, stdOut.String()) } } - return ret, nil }