diff --git a/src/placement/jump_map.c b/src/placement/jump_map.c index 56f0643555e..a8c6d52153c 100644 --- a/src/placement/jump_map.c +++ b/src/placement/jump_map.c @@ -1,7 +1,7 @@ /** * * (C) Copyright 2016-2024 Intel Corporation. - * (C) Copyright 2025 Hewlett Packard Enterprise Development LP + * (C) Copyright 2025-2026 Hewlett Packard Enterprise Development LP * * SPDX-License-Identifier: BSD-2-Clause-Patent */ @@ -734,6 +734,13 @@ get_object_layout(struct pl_jump_map *jmap, uint32_t layout_ver, struct pl_obj_l setbit(dom_cur_grp_real, domain - root); if (pool_target_down(target)) layout->ol_shards[k].po_rebuilding = 1; + + if (pool_target_is_down2up(target)) { + if (gen_mode == PRE_REBUILD) + layout->ol_shards[k].po_rebuilding = 1; + else + layout->ol_shards[k].po_reintegrating = 1; + } } if (is_extending != NULL && pool_target_is_up_or_drain(target)) diff --git a/src/placement/pl_map_common.c b/src/placement/pl_map_common.c index 5afe0691a37..52e9ee93371 100644 --- a/src/placement/pl_map_common.c +++ b/src/placement/pl_map_common.c @@ -1,5 +1,6 @@ /** * (C) Copyright 2016-2024 Intel Corporation. + * (C) Copyright 2026 Hewlett Packard Enterprise Development LP * * SPDX-License-Identifier: BSD-2-Clause-Patent */ @@ -255,13 +256,7 @@ is_comp_avaible(struct pool_component *comp, uint32_t allow_version, status = PO_COMP_ST_UPIN; } else if (status == PO_COMP_ST_UP) { if (comp->co_flags & PO_COMPF_DOWN2UP) { - /* PO_COMP_ST_UP status with PO_COMPF_DOWN2UP flag - * is the case of delay_rebuild exclude+reint. - * Cannot mark it as UPIN to avoid it be used for - * rebuild enumerate/fetch, as the data will be - * discarded in reintegrate. - */ - /* status = PO_COMP_ST_UPIN; */ + status = PO_COMP_ST_UPIN; } else { if (comp->co_fseq <= 1) status = PO_COMP_ST_NEW; @@ -394,9 +389,14 @@ determine_valid_spares(struct pool_target *spare_tgt, struct daos_obj_md *md, if (spare_avail) { /* The selected spare target is up and ready */ l_shard->po_target = spare_tgt->ta_comp.co_id; - l_shard->po_fseq = f_shard->fs_fseq; - l_shard->po_rank = spare_tgt->ta_comp.co_rank; - l_shard->po_index = spare_tgt->ta_comp.co_index; + l_shard->po_fseq = f_shard->fs_fseq; + l_shard->po_rank = spare_tgt->ta_comp.co_rank; + l_shard->po_index = spare_tgt->ta_comp.co_index; + + if (pool_target_is_down2up(spare_tgt)) + f_shard->fs_down2up = 1; + else + f_shard->fs_down2up = 0; /* * Mark the shard as 'rebuilding' so that read will skip this shard. @@ -406,6 +406,10 @@ determine_valid_spares(struct pool_target *spare_tgt, struct daos_obj_md *md, f_shard->fs_status == PO_COMP_ST_DRAIN || f_shard->fs_down2up || pool_target_down(spare_tgt)) l_shard->po_rebuilding = 1; + + if (f_shard->fs_down2up && gen_mode != PRE_REBUILD) + l_shard->po_reintegrating = 1; + } else { l_shard->po_shard = -1; l_shard->po_target = -1; diff --git a/src/rebuild/srv.c b/src/rebuild/srv.c index aeb8b2daa87..d2abc1eea74 100644 --- a/src/rebuild/srv.c +++ b/src/rebuild/srv.c @@ -1998,16 +1998,18 @@ rebuild_task_ult(void *arg) uint32_t map_dist_ver = 0; struct rebuild_global_pool_tracker *rgt = NULL; d_rank_t myrank; - uint64_t cur_ts = 0; + // uint64_t cur_ts = 0; uint32_t obj_reclaim_ver = 0; int rc; +#if 0 cur_ts = daos_gettime_coarse(); D_ASSERT(task->dst_schedule_time != (uint64_t)-1); if (cur_ts < task->dst_schedule_time) { D_INFO("rebuild task sleep " DF_U64 " second\n", task->dst_schedule_time - cur_ts); dss_sleep((task->dst_schedule_time - cur_ts) * 1000); } +#endif rc = ds_pool_lookup(task->dst_pool_uuid, &pool); if (pool == NULL) { @@ -2209,6 +2211,8 @@ rebuild_ults(void *arg) while (!d_list_empty(&rebuild_gst.rg_queue_list) || !d_list_empty(&rebuild_gst.rg_running_list)) { + uint64_t now; + if (rebuild_gst.rg_abort) { D_INFO("abort rebuilds\n"); break; @@ -2222,6 +2226,7 @@ rebuild_ults(void *arg) continue; } + now = daos_gettime_coarse(); task = d_list_entry(rebuild_gst.rg_queue_list.next, struct rebuild_task, dst_list); while (&rebuild_gst.rg_queue_list != &task->dst_list) { /* If a pool is already handling a rebuild operation, @@ -2229,6 +2234,7 @@ rebuild_ults(void *arg) * one completes */ if (pool_is_rebuilding(task->dst_pool_uuid) || + task->dst_schedule_time > now || task->dst_schedule_time == (uint64_t)-1) { struct rebuild_task *head_task = task; @@ -2261,7 +2267,7 @@ rebuild_ults(void *arg) } } - dss_sleep(0); + dss_sleep(100); } /* If there are still rebuild task in queue and running list, then