diff --git a/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/api/component/button/CsvDownloadButtonPanel.java b/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/api/component/button/CsvDownloadButtonPanel.java index e958b5b4700..7e4f4366584 100644 --- a/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/api/component/button/CsvDownloadButtonPanel.java +++ b/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/api/component/button/CsvDownloadButtonPanel.java @@ -13,12 +13,12 @@ import com.evolveum.midpoint.gui.api.util.WebModelServiceUtils; import com.evolveum.midpoint.prism.Referencable; import com.evolveum.midpoint.gui.impl.component.data.provider.SelectableBeanContainerDataProvider; +import com.evolveum.midpoint.gui.impl.component.data.provider.StreamingCsvDataExporter; import org.apache.commons.lang3.StringUtils; import org.apache.wicket.ajax.AjaxRequestTarget; import org.apache.wicket.extensions.markup.html.repeater.data.table.DataTable; import org.apache.wicket.extensions.markup.html.repeater.data.table.IColumn; -import org.apache.wicket.extensions.markup.html.repeater.data.table.export.CSVDataExporter; import org.apache.wicket.extensions.markup.html.repeater.data.table.export.ExportToolbar; import org.apache.wicket.extensions.markup.html.repeater.data.table.export.IExportableColumn; import org.apache.wicket.markup.repeater.data.IDataProvider; @@ -64,7 +64,7 @@ public CsvDownloadButtonPanel(String id) { private static final long serialVersionUID = 1L; private void initLayout() { - CSVDataExporter csvDataExporter = new CSVDataExporter() { + StreamingCsvDataExporter csvDataExporter = new StreamingCsvDataExporter(getPageBase()) { private static final long serialVersionUID = 1L; @Override @@ -116,10 +116,16 @@ public IResourceStream getResourceStream() { } public String getFileName() { + String fileName; if (StringUtils.isEmpty(name.getObject())) { - return CsvDownloadButtonPanel.this.getFilename(); + fileName = CsvDownloadButtonPanel.this.getFilename(); + } else { + fileName = name.getObject(); + } + if (!fileName.toLowerCase().endsWith(".csv")) { + fileName += ".csv"; } - return name.getObject(); + return fileName; } }; diff --git a/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/component/data/provider/BaseSearchDataProvider.java b/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/component/data/provider/BaseSearchDataProvider.java index a0e68e7ac4a..d3be50b39ec 100644 --- a/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/component/data/provider/BaseSearchDataProvider.java +++ b/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/component/data/provider/BaseSearchDataProvider.java @@ -16,7 +16,9 @@ import com.evolveum.midpoint.schema.GetOperationOptions; import com.evolveum.midpoint.schema.SelectorOptions; +import com.evolveum.midpoint.schema.ObjectHandler; import com.evolveum.midpoint.schema.result.OperationResult; +import com.evolveum.midpoint.task.api.Task; import com.evolveum.midpoint.util.exception.CommonException; import com.evolveum.midpoint.util.exception.SystemException; @@ -31,7 +33,8 @@ * @author lazyman */ public abstract class BaseSearchDataProvider - extends BaseSortableDataProvider { + extends BaseSortableDataProvider + implements IterativeExportSupport { private final IModel> search; @@ -126,4 +129,25 @@ public void detach() { super.detach(); search.detach(); } + + /** + * Default implementation throws UnsupportedOperationException. + * Subclasses should override this method to support streaming CSV export. + */ + @Override + public void exportIterative( + ObjectHandler handler, + Task task, + OperationResult result) throws CommonException { + throw new UnsupportedOperationException("Subclass must override exportIterative"); + } + + /** + * Returns false by default. Subclasses that implement exportIterative() + * should override this to return true. + */ + @Override + public boolean supportsIterativeExport() { + return false; + } } diff --git a/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/component/data/provider/ContainerListDataProvider.java b/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/component/data/provider/ContainerListDataProvider.java index 55101a43a66..bc088c5cbf8 100644 --- a/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/component/data/provider/ContainerListDataProvider.java +++ b/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/component/data/provider/ContainerListDataProvider.java @@ -20,6 +20,7 @@ import com.evolveum.midpoint.gui.api.factory.wrapper.PrismContainerWrapperFactory; import com.evolveum.midpoint.gui.api.factory.wrapper.WrapperContext; import com.evolveum.midpoint.gui.api.prism.wrapper.PrismContainerValueWrapper; +import com.evolveum.midpoint.gui.impl.prism.wrapper.PrismContainerValueWrapperImpl; import com.evolveum.midpoint.gui.api.util.WebComponentUtil; import com.evolveum.midpoint.gui.api.util.WebModelServiceUtils; import com.evolveum.midpoint.gui.impl.component.search.Search; @@ -30,7 +31,10 @@ import com.evolveum.midpoint.schema.SelectorOptions; import com.evolveum.midpoint.schema.result.OperationResult; import com.evolveum.midpoint.task.api.Task; +import com.evolveum.midpoint.schema.ObjectHandler; +import com.evolveum.midpoint.util.exception.CommonException; import com.evolveum.midpoint.util.exception.SchemaException; +import com.evolveum.midpoint.util.exception.SystemException; import com.evolveum.midpoint.util.logging.LoggingUtils; import com.evolveum.midpoint.util.logging.Trace; import com.evolveum.midpoint.util.logging.TraceManager; @@ -117,6 +121,15 @@ protected PrismContainerValueWrapper createWrapper(C object, Task task, Opera return (PrismContainerValueWrapper) factory.createValueWrapper(null, object.asPrismContainerValue(), ValueStatus.NOT_CHANGED, context); } + /** + * Creates a lightweight wrapper for export purposes. + * This skips child wrapper creation which is the main performance bottleneck. + * The wrapper only holds the PrismContainerValue - columns access data via getRealValue(). + */ + protected PrismContainerValueWrapper createExportWrapper(C object) { + return new PrismContainerValueWrapperImpl<>(null, object.asPrismContainerValue(), ValueStatus.NOT_CHANGED); + } + @Override protected int internalSize() { LOGGER.trace("begin::internalSize()"); @@ -146,4 +159,53 @@ public void detach() { super.detach(); getAvailableData().clear(); } + + @Override + public boolean supportsIterativeExport() { + return true; + } + + /** + * Streaming export using JDBC cursor-based streaming. + * This method does not load all data into memory - uses true JDBC streaming. + * Streaming is enabled by setting iterationPageSize to -1. + * Uses lightweight wrapper to skip expensive child wrapper creation. + */ + @Override + public void exportIterative( + ObjectHandler> handler, + Task task, + OperationResult result) throws CommonException { + + ObjectQuery query = getQuery(); + if (query == null) { + query = getPrismContext().queryFactory().createQuery(); + } + // Set ordering from current sort settings (no offset/limit for full export) + query.setPaging(createPaging(0, Integer.MAX_VALUE)); + + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("exportIterative: Query {} with {}", getType().getSimpleName(), query.debugDump()); + } + + // Enable JDBC streaming mode by setting iterationPageSize to -1 + Collection> streamingOptions = + SelectorOptions.updateRootOptions(options, + opt -> opt.setIterationPageSize(-1), GetOperationOptions::new); + + getModelService().searchContainersIterative( + getType(), + query, + (object, opResult) -> { + PrismContainerValueWrapper wrapper = createExportWrapper(object); + if (wrapper != null) { + return handler.handle(wrapper, opResult); + } + return true; + }, + streamingOptions, + task, + result + ); + } } diff --git a/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/component/data/provider/IterativeExportSupport.java b/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/component/data/provider/IterativeExportSupport.java new file mode 100644 index 00000000000..a7533797101 --- /dev/null +++ b/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/component/data/provider/IterativeExportSupport.java @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2010-2026 Evolveum and contributors + * + * Licensed under the EUPL-1.2 or later. + */ + +package com.evolveum.midpoint.gui.impl.component.data.provider; + +import com.evolveum.midpoint.schema.ObjectHandler; +import com.evolveum.midpoint.schema.result.OperationResult; +import com.evolveum.midpoint.task.api.Task; +import com.evolveum.midpoint.util.exception.CommonException; + +/** + * Interface for DataProviders that support iterative export. + * This allows streaming export without loading all data into memory. + * + * @param The type of items being exported + */ +public interface IterativeExportSupport { + + /** + * Execute iterative search and pass each item to the handler. + * The search will stop if the handler returns false. + * + * @param handler Handler to process each item. Returns true to continue, false to stop. + * @param task Task for the operation + * @param result Operation result + * @throws CommonException if an error occurs during the search + */ + void exportIterative(ObjectHandler handler, Task task, OperationResult result) throws CommonException; + + /** + * Returns true if this provider actually supports iterative export. + * Default is true, but BaseSearchDataProvider overrides to return false + * so that subclasses must explicitly enable support. + */ + default boolean supportsIterativeExport() { + return true; + } +} diff --git a/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/component/data/provider/SelectableBeanContainerDataProvider.java b/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/component/data/provider/SelectableBeanContainerDataProvider.java index a5e2f8a104b..229d230c6f2 100644 --- a/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/component/data/provider/SelectableBeanContainerDataProvider.java +++ b/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/component/data/provider/SelectableBeanContainerDataProvider.java @@ -15,6 +15,10 @@ import com.evolveum.midpoint.gui.api.util.WebComponentUtil; import com.evolveum.midpoint.gui.api.util.WebModelServiceUtils; +import com.evolveum.midpoint.prism.PrismContext; +import com.evolveum.midpoint.schema.ObjectHandler; +import com.evolveum.midpoint.web.component.util.SelectableBean; + import org.apache.wicket.Component; import org.apache.wicket.model.IModel; import org.jetbrains.annotations.NotNull; @@ -71,4 +75,51 @@ protected void addCachedSize(Map cache, CachedSize new protected boolean match(C selectedValue, C foundValue) { return selectedValue.asPrismContainerValue().equivalent(foundValue.asPrismContainerValue()); } + + @Override + public boolean supportsIterativeExport() { + return true; + } + + /** + * Streaming export using JDBC cursor-based streaming. + * This method does not load all data into memory - uses true JDBC streaming. + * Streaming is enabled by setting iterationPageSize to -1. + */ + @Override + public void exportIterative( + ObjectHandler> handler, + Task task, + OperationResult result) throws CommonException { + + ObjectQuery query = getQuery(); + if (query == null) { + query = PrismContext.get().queryFactory().createQuery(); + } + // Set ordering from current sort settings (no offset/limit for full export) + query.setPaging(createPaging(0, Integer.MAX_VALUE)); + + // Enable JDBC streaming mode by setting iterationPageSize to -1 + Collection> streamingOptions = + SelectorOptions.updateRootOptions(getSearchOptions(), + opt -> opt.setIterationPageSize(-1), GetOperationOptions::new); + + searchObjectsIterative(getType(), query, + (object, opResult) -> { + SelectableBean wrapper = createDataObjectWrapper(object); + return handler.handle(wrapper, opResult); + }, + streamingOptions, task, result); + } + + /** + * Override this method to use a different iterative search implementation. + * Default implementation uses ModelService.searchContainersIterative(). + */ + protected void searchObjectsIterative(Class type, ObjectQuery query, + ObjectHandler handler, + Collection> options, + Task task, OperationResult result) throws CommonException { + getModelService().searchContainersIterative(type, query, handler, options, task, result); + } } diff --git a/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/component/data/provider/SelectableBeanObjectDataProvider.java b/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/component/data/provider/SelectableBeanObjectDataProvider.java index 7d30d6eb84a..7e2ed3df853 100644 --- a/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/component/data/provider/SelectableBeanObjectDataProvider.java +++ b/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/component/data/provider/SelectableBeanObjectDataProvider.java @@ -25,6 +25,7 @@ import com.evolveum.midpoint.prism.PrismObject; import com.evolveum.midpoint.prism.query.ObjectQuery; import com.evolveum.midpoint.schema.GetOperationOptions; +import com.evolveum.midpoint.schema.ObjectHandler; import com.evolveum.midpoint.schema.SelectorOptions; import com.evolveum.midpoint.schema.result.OperationResult; import com.evolveum.midpoint.task.api.Task; @@ -129,4 +130,50 @@ public ObjectPaging createPaging(long offset, long pageSize) { public void setTaskConsumer(Consumer taskConsumer) { this.taskConsumer = taskConsumer; } + + @Override + public boolean supportsIterativeExport() { + return true; + } + + /** + * Streaming export using searchObjectsIterative with JDBC streaming. + * This method does not load all data into memory - uses true JDBC streaming. + * Streaming is enabled by setting iterationPageSize to -1. + */ + @Override + public void exportIterative( + ObjectHandler> handler, + Task task, + OperationResult result) throws CommonException { + + ObjectQuery query = getQuery(); + if (query == null) { + query = getPrismContext().queryFactory().createQuery(); + } + // Set ordering from current sort settings (no offset/limit for full export) + query.setPaging(createPaging(0, Integer.MAX_VALUE)); + + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("exportIterative: Query {} with {}", getType().getSimpleName(), query.debugDump()); + } + + // Enable JDBC streaming mode by setting iterationPageSize to -1 + Collection> streamingOptions = + SelectorOptions.updateRootOptions(getSearchOptions(), + opt -> opt.setIterationPageSize(-1), GetOperationOptions::new); + + getModelService().searchObjectsIterative( + getType(), + query, + (object, opResult) -> { + O objectable = object.asObjectable(); + SelectableBean wrapper = createDataObjectWrapper(objectable); + return handler.handle(wrapper, opResult); + }, + streamingOptions, + task, + result + ); + } } diff --git a/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/component/data/provider/StreamingCsvDataExporter.java b/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/component/data/provider/StreamingCsvDataExporter.java new file mode 100644 index 00000000000..50e1a601504 --- /dev/null +++ b/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/component/data/provider/StreamingCsvDataExporter.java @@ -0,0 +1,171 @@ +/* + * Copyright (C) 2010-2026 Evolveum and contributors + * + * Licensed under the EUPL-1.2 or later. + */ + +package com.evolveum.midpoint.gui.impl.component.data.provider; + +import java.io.IOException; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.io.Writer; +import java.nio.charset.Charset; +import java.util.List; + +import org.apache.wicket.Session; +import org.apache.wicket.extensions.markup.html.repeater.data.table.export.CSVDataExporter; +import org.apache.wicket.extensions.markup.html.repeater.data.table.export.IExportableColumn; +import org.apache.wicket.markup.repeater.data.IDataProvider; +import org.apache.wicket.model.IModel; +import org.apache.wicket.util.convert.IConverter; + +import com.evolveum.midpoint.gui.api.page.PageBase; +import com.evolveum.midpoint.schema.result.OperationResult; +import com.evolveum.midpoint.task.api.Task; +import com.evolveum.midpoint.util.exception.CommonException; +import com.evolveum.midpoint.util.logging.LoggingUtils; +import com.evolveum.midpoint.util.logging.Trace; +import com.evolveum.midpoint.util.logging.TraceManager; + +/** + * CSV data exporter that uses streaming (iterative) export to avoid loading all data into memory. + * This extends Wicket's CSVDataExporter and overrides exportData to use IterativeExportSupport + * when available. + */ +public class StreamingCsvDataExporter extends CSVDataExporter { + + private static final long serialVersionUID = 1L; + + private static final Trace LOGGER = TraceManager.getTrace(StreamingCsvDataExporter.class); + private static final String DOT_CLASS = StreamingCsvDataExporter.class.getName() + "."; + private static final String OPERATION_EXPORT_DATA = DOT_CLASS + "exportData"; + + private final PageBase pageBase; + + public StreamingCsvDataExporter(PageBase pageBase) { + this.pageBase = pageBase; + } + + @Override + public void exportData(IDataProvider dataProvider, + List> columns, OutputStream outputStream) + throws IOException { + + if (!(dataProvider instanceof IterativeExportSupport) + || !((IterativeExportSupport) dataProvider).supportsIterativeExport()) { + // Fall back to standard export if provider doesn't support iterative export + LOGGER.info("DataProvider {} does not support iterative export, falling back to standard export", + dataProvider.getClass().getName()); + super.exportData(dataProvider, columns, outputStream); + return; + } + + @SuppressWarnings("unchecked") + IterativeExportSupport iterativeProvider = (IterativeExportSupport) dataProvider; + + Task task = pageBase.createSimpleTask(OPERATION_EXPORT_DATA); + OperationResult result = task.getResult(); + + try (CsvGrid grid = new CsvGrid(new OutputStreamWriter(outputStream, Charset.forName(getCharacterSet())))) { + writeHeaders(columns, grid); + writeDataIterative(dataProvider, iterativeProvider, columns, grid, task, result); + } catch (CommonException e) { + LoggingUtils.logUnexpectedException(LOGGER, "Error during iterative CSV export", e); + throw new IOException("Error during iterative CSV export: " + e.getMessage(), e); + } finally { + result.computeStatusIfUnknown(); + } + } + + private void writeHeaders(List> columns, CsvGrid grid) + throws IOException { + if (isExportHeadersEnabled()) { + for (IExportableColumn col : columns) { + IModel displayModel = col.getDisplayModel(); + String display = wrapModel(displayModel).getObject(); + grid.cell(quoteValue(display)); + } + grid.row(); + } + } + + @SuppressWarnings({ "rawtypes", "unchecked" }) + private void writeDataIterative( + IDataProvider dataProvider, + IterativeExportSupport iterativeProvider, + List> columns, + CsvGrid grid, + Task task, + OperationResult result) throws CommonException { + + iterativeProvider.exportIterative( + (item, opResult) -> { + try { + writeRow(dataProvider, columns, item, grid); + return true; + } catch (IOException e) { + LOGGER.error("Error writing CSV row", e); + return false; + } + }, + task, + result + ); + } + + @SuppressWarnings({ "rawtypes", "unchecked" }) + private void writeRow(IDataProvider dataProvider, + List> columns, T row, CsvGrid grid) throws IOException { + for (IExportableColumn col : columns) { + IModel dataModel = col.getDataModel(dataProvider.model(row)); + Object value = wrapModel(dataModel).getObject(); + if (value != null) { + Class c = value.getClass(); + String s; + IConverter converter = getConverterLocator().getConverter(c); + if (converter == null) { + s = value.toString(); + } else { + s = converter.convertToString(value, Session.get().getLocale()); + } + grid.cell(quoteValue(s)); + } else { + grid.cell(""); + } + } + grid.row(); + } + + /** + * Simple CSV grid writer that handles cells and rows. + */ + private class CsvGrid implements AutoCloseable { + private final Writer writer; + private boolean first = true; + + public CsvGrid(Writer writer) { + this.writer = writer; + } + + public void cell(String value) throws IOException { + if (first) { + first = false; + } else { + writer.write(getDelimiter()); + } + writer.write(value); + } + + public void row() throws IOException { + writer.write("\r\n"); + writer.flush(); // Flush after each row for streaming + first = true; + } + + @Override + public void close() throws IOException { + writer.close(); + } + } +} diff --git a/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/page/admin/certification/column/CertItemObjectDisplayNameColumn.java b/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/page/admin/certification/column/CertItemObjectDisplayNameColumn.java index f8395a02249..1f8d6cf15fb 100644 --- a/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/page/admin/certification/column/CertItemObjectDisplayNameColumn.java +++ b/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/page/admin/certification/column/CertItemObjectDisplayNameColumn.java @@ -58,8 +58,16 @@ public IModel getDataModel(IModel> rowModel) { AccessCertificationCaseType certCase = CertCampaignTypeUtil.getCase(unwrapRowModel(rowModel)); - PrismObject resolvedObject = WebModelServiceUtils.loadObject(certCase.getObjectRef(), null, context.getPageBase()); - return WebComponentUtil.getDisplayName(resolvedObject, true); + if (certCase == null || certCase.getObjectRef() == null) { + return ""; + } + // Use pre-loaded object from reference if available (set by beforeTransformation optimization) + PrismObject resolvedObject = certCase.getObjectRef().getObject(); + if (resolvedObject == null) { + // Fallback to loading object (for non-optimized paths) + resolvedObject = WebModelServiceUtils.loadObject(certCase.getObjectRef(), null, context.getPageBase()); + } + return WebComponentUtil.getDisplayName(resolvedObject, true); } }; } diff --git a/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/page/admin/certification/column/CertItemTargetDisplayNameColumn.java b/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/page/admin/certification/column/CertItemTargetDisplayNameColumn.java index 1a08d35587a..b5494ea6fb9 100644 --- a/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/page/admin/certification/column/CertItemTargetDisplayNameColumn.java +++ b/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/page/admin/certification/column/CertItemTargetDisplayNameColumn.java @@ -56,10 +56,17 @@ public IModel getDataModel(IModel> rowModel) { AccessCertificationCaseType certCase = CertCampaignTypeUtil.getCase(unwrapRowModel(rowModel)); - PrismObject resolvedObject = WebModelServiceUtils.loadObject(certCase.getTargetRef(), null, context.getPageBase()); - return WebComponentUtil.getDisplayName(resolvedObject, true); + if (certCase == null || certCase.getTargetRef() == null) { + return ""; + } + // Use pre-loaded object from reference if available (set by beforeTransformation optimization) + PrismObject resolvedObject = certCase.getTargetRef().getObject(); + if (resolvedObject == null) { + // Fallback to loading object (for non-optimized paths) + resolvedObject = WebModelServiceUtils.loadObject(certCase.getTargetRef(), null, context.getPageBase()); + } + return WebComponentUtil.getDisplayName(resolvedObject, true); } }; } } - diff --git a/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/page/admin/certification/component/CertificationWorkItemTable.java b/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/page/admin/certification/component/CertificationWorkItemTable.java index e9f87b86d92..61257cd545d 100644 --- a/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/page/admin/certification/component/CertificationWorkItemTable.java +++ b/gui/admin-gui/src/main/java/com/evolveum/midpoint/gui/impl/page/admin/certification/component/CertificationWorkItemTable.java @@ -13,6 +13,9 @@ import java.util.*; import java.util.stream.Collectors; +import org.apache.wicket.extensions.markup.html.repeater.util.SortParam; +import org.jetbrains.annotations.NotNull; + import com.evolveum.midpoint.gui.impl.component.action.CertItemResolveAction; import com.evolveum.midpoint.gui.impl.page.admin.certification.column.AbstractGuiColumn; @@ -37,8 +40,12 @@ import com.evolveum.midpoint.gui.impl.page.admin.certification.helpers.CertMiscUtil; import com.evolveum.midpoint.gui.impl.util.IconAndStylesUtil; import com.evolveum.midpoint.model.api.authentication.CompiledObjectCollectionView; +import com.evolveum.midpoint.prism.PrismConstants; import com.evolveum.midpoint.prism.PrismContext; +import com.evolveum.midpoint.prism.path.ItemPath; +import com.evolveum.midpoint.prism.query.ObjectOrdering; import com.evolveum.midpoint.prism.query.ObjectQuery; +import com.evolveum.midpoint.prism.query.OrderDirection; import com.evolveum.midpoint.schema.GetOperationOptions; import com.evolveum.midpoint.schema.SelectorOptions; import com.evolveum.midpoint.schema.result.OperationResult; @@ -219,8 +226,25 @@ protected ObjectQuery getCustomizeContentQuery() { return getOpenCertWorkItemsQuery(); } + @Override + protected @NotNull List createObjectOrderings(SortParam sortParam) { + if (sortParam != null && sortParam.getProperty() != null) { + return super.createObjectOrderings(sortParam); + } + // Default sort: match PK order (ownerOid, accessCertCaseCid, cid) for index utilization + PrismContext prismContext = PrismContext.get(); + ItemPath casePath = ItemPath.create(PrismConstants.T_PARENT); + ItemPath campaignPath = casePath.append(PrismConstants.T_PARENT); + return List.of( + prismContext.queryFactory().createOrdering( + campaignPath.append(PrismConstants.T_ID), OrderDirection.ASCENDING), + prismContext.queryFactory().createOrdering( + casePath.append(PrismConstants.T_ID), OrderDirection.ASCENDING), + prismContext.queryFactory().createOrdering( + PrismConstants.T_ID, OrderDirection.ASCENDING) + ); + } }; -// provider.setSort(CaseWorkItemType.F_DEADLINE.getLocalPart(), SortOrder.DESCENDING); return provider; } diff --git a/gui/admin-gui/src/main/java/com/evolveum/midpoint/web/page/admin/reports/component/AuditLogViewerPanel.java b/gui/admin-gui/src/main/java/com/evolveum/midpoint/web/page/admin/reports/component/AuditLogViewerPanel.java index 3353cdfb391..eb8185c50a1 100644 --- a/gui/admin-gui/src/main/java/com/evolveum/midpoint/web/page/admin/reports/component/AuditLogViewerPanel.java +++ b/gui/admin-gui/src/main/java/com/evolveum/midpoint/web/page/admin/reports/component/AuditLogViewerPanel.java @@ -54,6 +54,7 @@ import com.evolveum.midpoint.prism.query.ObjectQuery; import com.evolveum.midpoint.prism.query.OrderDirection; import com.evolveum.midpoint.schema.GetOperationOptions; +import com.evolveum.midpoint.schema.ObjectHandler; import com.evolveum.midpoint.schema.SelectorOptions; import com.evolveum.midpoint.schema.expression.VariablesMap; import com.evolveum.midpoint.schema.result.OperationResult; @@ -172,6 +173,14 @@ protected List createObjectOrderings(SortParam sortParam protected ObjectQuery getCustomizeContentQuery() { return AuditLogViewerPanel.this.getCustomizeContentQuery(); } + + @Override + protected void searchObjectsIterative(Class type, ObjectQuery query, + ObjectHandler handler, + Collection> options, + Task task, OperationResult result) throws CommonException { + getPageBase().getModelAuditService().searchObjectsIterative(query, options, handler::handle, task, result); + } }; provider.setSort(AuditEventRecordType.F_TIMESTAMP.getLocalPart(), SortOrder.DESCENDING); return provider; diff --git a/infra/common/src/main/java/com/evolveum/midpoint/common/LocalizationServiceImpl.java b/infra/common/src/main/java/com/evolveum/midpoint/common/LocalizationServiceImpl.java index b920abf2573..7750419f3fc 100644 --- a/infra/common/src/main/java/com/evolveum/midpoint/common/LocalizationServiceImpl.java +++ b/infra/common/src/main/java/com/evolveum/midpoint/common/LocalizationServiceImpl.java @@ -88,14 +88,11 @@ public String translate(String key, Object[] params, Locale locale, String defau Object[] translated = translateParams(params, locale); for (MessageSource source : sources) { - try { - String value = source.getMessage(key, translated, locale); - if (StringUtils.isNotEmpty(value)) { - LOG.trace("Resolved key {} to value {} using message source {}", key, value, source); - return value; - } - } catch (NoSuchMessageException ex) { - // nothing to do + // Use getMessage with defaultMessage=null to avoid NoSuchMessageException + String value = source.getMessage(key, translated, null, locale); + if (StringUtils.isNotEmpty(value)) { + LOG.trace("Resolved key {} to value {} using message source {}", key, value, source); + return value; } } diff --git a/infra/schema/src/main/java/com/evolveum/midpoint/schema/result/OperationResult.java b/infra/schema/src/main/java/com/evolveum/midpoint/schema/result/OperationResult.java index 90a1a6827f6..e73bd276dce 100644 --- a/infra/schema/src/main/java/com/evolveum/midpoint/schema/result/OperationResult.java +++ b/infra/schema/src/main/java/com/evolveum/midpoint/schema/result/OperationResult.java @@ -2520,9 +2520,9 @@ private void cleanupInternal(Throwable e) { if (subresults == null) { return; } - Iterator iterator = subresults.iterator(); - while (iterator.hasNext()) { - OperationResult subresult = iterator.next(); + + // First pass: check for UNKNOWN status (throws exception if found) + for (OperationResult subresult : subresults) { if (subresult.getStatus() == OperationResultStatus.UNKNOWN) { String message = "Subresult " + subresult.getOperation() + " of operation " + operation + " is still UNKNOWN during cleanup"; LOGGER.error("{}:\n{}", message, this.debugDump(), e); @@ -2532,10 +2532,10 @@ private void cleanupInternal(Throwable e) { throw new IllegalStateException(message + "; during handling of exception " + e, e); } } - if (subresult.canCleanup(preserveDuringCleanup)) { - iterator.remove(); - } } + + // Second pass: remove cleanable subresults efficiently using removeIf (O(n) instead of O(n^2)) + subresults.removeIf(subresult -> subresult.canCleanup(preserveDuringCleanup)); } private boolean canCleanup(OperationResultImportanceType preserveDuringCleanup) { diff --git a/repo/repo-sqale/src/main/java/com/evolveum/midpoint/repo/sqale/SqaleRepositoryService.java b/repo/repo-sqale/src/main/java/com/evolveum/midpoint/repo/sqale/SqaleRepositoryService.java index 41cd5fad781..613591eeb39 100644 --- a/repo/repo-sqale/src/main/java/com/evolveum/midpoint/repo/sqale/SqaleRepositoryService.java +++ b/repo/repo-sqale/src/main/java/com/evolveum/midpoint/repo/sqale/SqaleRepositoryService.java @@ -1083,6 +1083,13 @@ private SearchResultMetadata executeSearchObjectsIterativ Collection> options, OperationResult operationResult) throws SchemaException, RepositoryException { + // Check if streaming mode is requested via iterationPageSize = -1 + Integer configuredPageSize = getIterationPageSize(options); + if (configuredPageSize != null && configuredPageSize == -1) { + // Use JDBC streaming mode with default fetch size of 1000 + return executeSearchObjectsIterativeStreaming(type, originalQuery, handler, options, operationResult, 1000); + } + try { ObjectPaging originalPaging = originalQuery != null ? originalQuery.getPaging() : null; // this is total requested size of the search @@ -1185,6 +1192,41 @@ private SearchResultMetadata executeSearchObjectsIterativ } } + /** + * Streaming version of searchObjectsIterative that uses JDBC cursor-based streaming. + * Does not use keyset pagination - fetches all matching rows in a single query with cursor. + * This is more efficient for large exports as it avoids multiple SQL round trips. + * + * @param fetchSize JDBC fetch size for streaming (e.g., 1000) + */ + private SearchResultMetadata executeSearchObjectsIterativeStreaming( + Class type, + ObjectQuery query, + ResultHandler handler, + Collection> options, + OperationResult operationResult, + int fetchSize) throws SchemaException, RepositoryException { + + long opHandle = registerOperationStart(OP_SEARCH_OBJECTS_ITERATIVE, type); + try { + SqaleQueryContext, Object> queryContext = + SqaleQueryContext.from(type, sqlRepoContext); + + // Wrap handler to convert ObjectType to PrismObject + ObjectHandler wrappedHandler = (object, opResult) -> { + @SuppressWarnings("unchecked") + PrismObject prismObject = (PrismObject) object.asPrismObject(); + return handler.handle(prismObject, opResult); + }; + + int count = sqlQueryExecutor.listStreaming(queryContext, query, options, wrappedHandler, operationResult, fetchSize); + + return new SearchResultMetadata().approxNumberOfAllResults(count); + } finally { + registerOperationFinish(opHandle); + } + } + /** * Without requested ordering, this is easy: `WHERE oid > lastOid` * @@ -1633,6 +1675,13 @@ private SearchResultMetadata executeSearchContainersIt Collection> options, OperationResult operationResult) throws SchemaException, RepositoryException { + // Check if streaming mode is requested via iterationPageSize = -1 + Integer configuredPageSize = getIterationPageSize(options); + if (configuredPageSize != null && configuredPageSize == -1) { + // Use JDBC streaming mode with default fetch size of 1000 + return executeSearchContainersIterativeStreaming(type, originalQuery, handler, options, operationResult, 1000); + } + try { ObjectPaging originalPaging = originalQuery != null ? originalQuery.getPaging() : null; // this is total requested size of the search @@ -1741,6 +1790,73 @@ public Integer getIterationPageSize(Collection SearchResultMetadata searchContainersIterativeStreaming( + @NotNull Class type, + @Nullable ObjectQuery query, + @NotNull ObjectHandler handler, + @Nullable Collection> options, + @NotNull OperationResult parentResult, + int fetchSize) throws SchemaException { + + Validate.notNull(type, "Object type must not be null."); + Validate.notNull(handler, "Result handler must not be null."); + Validate.notNull(parentResult, "Operation result must not be null."); + + OperationResult operationResult = parentResult.subresult(opNamePrefix + OP_SEARCH_CONTAINERS_ITERATIVE) + .addQualifier(type.getSimpleName()) + .addParam(OperationResult.PARAM_TYPE, type.getName()) + .addParam(OperationResult.PARAM_QUERY, query) + .addParam("streaming", true) + .addParam("fetchSize", fetchSize) + .build(); + + try { + logSearchInputParameters(type, query, "Streaming iterative search containers"); + + query = ObjectQueryUtil.simplifyQuery(query); + if (ObjectQueryUtil.isNoneQuery(query)) { + return new SearchResultMetadata().approxNumberOfAllResults(0); + } + + return executeSearchContainersIterativeStreaming(type, query, handler, options, operationResult, fetchSize); + } catch (RepositoryException | RuntimeException e) { + throw handledGeneralException(e, operationResult); + } catch (Throwable t) { + recordFatalError(operationResult, t); + throw t; + } finally { + operationResult.close(); + } + } + + private SearchResultMetadata executeSearchContainersIterativeStreaming( + Class type, + ObjectQuery query, + ObjectHandler handler, + Collection> options, + OperationResult operationResult, + int fetchSize) throws SchemaException, RepositoryException { + + long opHandle = registerOperationStart(OP_SEARCH_CONTAINERS_ITERATIVE, type); + try { + SqaleQueryContext, Object> queryContext = + SqaleQueryContext.from(type, sqlRepoContext, this::readByOid); + + int count = sqlQueryExecutor.listStreaming(queryContext, query, options, handler, operationResult, fetchSize); + + return new SearchResultMetadata().approxNumberOfAllResults(count); + } finally { + registerOperationFinish(opHandle); + } + } + /** * So continuation filter for depth 1: should be like: * (orderingValue > $last/orderingValue) diff --git a/repo/repo-sqale/src/main/java/com/evolveum/midpoint/repo/sqale/audit/SqaleAuditService.java b/repo/repo-sqale/src/main/java/com/evolveum/midpoint/repo/sqale/audit/SqaleAuditService.java index fe29b3fb7ed..3fb3100428d 100644 --- a/repo/repo-sqale/src/main/java/com/evolveum/midpoint/repo/sqale/audit/SqaleAuditService.java +++ b/repo/repo-sqale/src/main/java/com/evolveum/midpoint/repo/sqale/audit/SqaleAuditService.java @@ -602,6 +602,22 @@ public SearchResultMetadata searchObjectsIterative( } } + /** + * Returns iteration page size from options, or default from configuration. + */ + private Integer getIterationPageSize(Collection> options) { + if (options != null) { + for (var option : options) { + if (option.isRoot() && option.getOptions() != null) { + if (option.getOptions().getIterationPageSize() != null) { + return option.getOptions().getIterationPageSize(); + } + } + } + } + return repositoryConfiguration().getIterativeSearchByPagingBatchSize(); + } + /* TODO: We should try to unify iterative search for repo and audit. There are some obvious differences - like the provider of the page results - the differences need to be @@ -618,6 +634,13 @@ private SearchResultMetadata executeSearchObjectsIterative( Collection> options, OperationResult operationResult) throws SchemaException, RepositoryException { + // Check if streaming mode is requested via iterationPageSize = -1 + Integer configuredPageSize = getIterationPageSize(options); + if (configuredPageSize != null && configuredPageSize == -1) { + // Use JDBC streaming mode with default fetch size of 1000 + return executeSearchObjectsIterativeStreaming(originalQuery, handler, options, operationResult, 1000); + } + try { ObjectPaging originalPaging = originalQuery != null ? originalQuery.getPaging() : null; // this is total requested size of the search @@ -704,6 +727,36 @@ private SearchResultMetadata executeSearchObjectsIterative( } } + /** + * Streaming version of searchObjectsIterative that uses JDBC cursor-based streaming. + * Does not use keyset pagination - fetches all matching rows in a single query with cursor. + * This is more efficient for large exports as it avoids multiple SQL round trips. + * + * @param fetchSize JDBC fetch size for streaming (e.g., 1000) + */ + private SearchResultMetadata executeSearchObjectsIterativeStreaming( + ObjectQuery query, + AuditResultHandler handler, + Collection> options, + OperationResult operationResult, + int fetchSize) throws SchemaException, RepositoryException { + + long opHandle = registerOperationStart(OP_SEARCH_OBJECTS_ITERATIVE); + try { + SqaleQueryContext queryContext = + SqaleQueryContext.from(AuditEventRecordType.class, sqlRepoContext); + + int count = sqlQueryExecutor.listStreaming( + queryContext, query, options, + (object, opResult) -> handler.handle(object, opResult), + operationResult, fetchSize); + + return new SearchResultMetadata().approxNumberOfAllResults(count); + } finally { + registerOperationFinish(opHandle); + } + } + /** * Similar to {@link SqaleRepositoryService#lastOidCondition}. * diff --git a/repo/repo-sqale/src/main/java/com/evolveum/midpoint/repo/sqale/mapping/ReferenceNameResolver.java b/repo/repo-sqale/src/main/java/com/evolveum/midpoint/repo/sqale/mapping/ReferenceNameResolver.java index e19795b236c..56a263f2082 100644 --- a/repo/repo-sqale/src/main/java/com/evolveum/midpoint/repo/sqale/mapping/ReferenceNameResolver.java +++ b/repo/repo-sqale/src/main/java/com/evolveum/midpoint/repo/sqale/mapping/ReferenceNameResolver.java @@ -8,6 +8,9 @@ import java.util.*; +import com.evolveum.midpoint.repo.sqale.qmodel.focus.QUser; +import com.evolveum.midpoint.repo.sqale.qmodel.role.MAbstractRole; +import com.evolveum.midpoint.repo.sqale.qmodel.role.QAbstractRole; import com.evolveum.midpoint.repo.sqale.qmodel.shadow.QShadow; import com.evolveum.midpoint.repo.sqlbase.SqlBaseOperationTracker; @@ -32,8 +35,29 @@ public abstract class ReferenceNameResolver { + /** + * Holds resolved name and display name for an object. + * Display name is: fullName for Users, displayName for AbstractRoles, null otherwise. + */ + public record ResolvedNames(String name, String displayName) {} + public abstract S resolve(S object, JdbcSession session); + /** + * Batch resolve names and display names for a set of OIDs. + * Results are cached and can be retrieved via {@link #getResolvedNames(UUID)}. + * + * @param session JDBC session + * @param oids OIDs to resolve + * @param includeDisplayNames if true, also resolve display names (requires LEFT JOIN) + */ + public abstract void batchResolve(JdbcSession session, Set oids, boolean includeDisplayNames); + + /** + * Get cached resolved names for an OID, or null if not resolved. + */ + public abstract ResolvedNames getResolvedNames(UUID oid); + public static ReferenceNameResolver from(Collection> options) { @NotNull List paths = getPathsToResolve(options); @@ -71,15 +95,27 @@ private static final class Noop extends ReferenceNameResolver { public S resolve(S object, JdbcSession session) { return object; } + + @Override + public void batchResolve(JdbcSession session, Set oids, boolean includeDisplayNames) { + // No-op + } + + @Override + public ResolvedNames getResolvedNames(UUID oid) { + return null; + } } private static final class Impl extends ReferenceNameResolver { private final List paths; - private final Map uuidToName = new HashMap<>(); private final Set oidsToResolve = new HashSet<>(); private final Set shadowOidsToResolve = new HashSet<>(); + // Unified cache for name and displayName + private final Map resolvedNamesCache = new HashMap<>(); + public Impl(List paths) { super(); @@ -120,30 +156,31 @@ public S resolve(S object, JdbcSession session) { } private void resolveNames(JdbcSession session) { - - // TODO: Add batch processing var object = new QObject<>(MObject.class, "obj"); var shadow = new QShadow("s"); - resolveNames(session, object, oidsToResolve); - resolveNames(session, shadow, shadowOidsToResolve); - + resolveNamesSimple(session, object, oidsToResolve); + resolveNamesSimple(session, shadow, shadowOidsToResolve); } - private void resolveNames(JdbcSession session, QObject object, Set oidsToResolve) { + + /** + * Simple name resolution (name only, no displayName). + * Used by resolve() for references not pre-loaded by batchResolve(). + */ + private void resolveNamesSimple(JdbcSession session, QObject object, Set oidsToResolve) { if (oidsToResolve.isEmpty()) { return; } List namesResult = session.newQuery() .from(object) - .select(object.oid, object.nameOrig, object.nameNorm) + .select(object.oid, object.nameOrig) .where(object.oid.in(oidsToResolve)) .fetch(); for (Tuple named : namesResult) { UUID uuid = named.get(object.oid); oidsToResolve.remove(uuid); - String orig = named.get(object.nameOrig); - String norm = named.get(object.nameNorm); - PolyString poly = new PolyString(orig, norm); - uuidToName.put(uuid, poly); + String name = named.get(object.nameOrig); + // Only set name, displayName is null (not requested) + resolvedNamesCache.put(uuid, new ResolvedNames(name, null)); } } @@ -162,10 +199,10 @@ private void initialVisit(PrismReferenceValue value) { return; } UUID oid = SqaleUtils.oidToUuid(value.getOid()); - PolyString maybe = uuidToName.get(oid); - if (maybe != null) { - value.setTargetName(maybe); - } else if (QNameUtil.match(ShadowType.COMPLEX_TYPE,value.getTargetType())) { + ResolvedNames resolved = resolvedNamesCache.get(oid); + if (resolved != null) { + value.setTargetName(PolyString.fromOrig(resolved.name())); + } else if (QNameUtil.match(ShadowType.COMPLEX_TYPE, value.getTargetType())) { shadowOidsToResolve.add(oid); } else { oidsToResolve.add(oid); @@ -176,10 +213,77 @@ private void updateReference(PrismReferenceValue value) { if (value.getOid() == null) { return; } - PolyString name = uuidToName.get(SqaleUtils.oidToUuid(value.getOid())); - if (name != null) { - value.setTargetName(name); + ResolvedNames resolved = resolvedNamesCache.get(SqaleUtils.oidToUuid(value.getOid())); + if (resolved != null) { + value.setTargetName(PolyString.fromOrig(resolved.name())); + } + } + + @Override + public void batchResolve(JdbcSession session, Set oids, boolean includeDisplayNames) { + if (oids == null || oids.isEmpty()) { + return; } + + // Filter out OIDs that are already cached + Set oidsToLoad = new HashSet<>(); + for (UUID oid : oids) { + if (!resolvedNamesCache.containsKey(oid)) { + oidsToLoad.add(oid); + } + } + + if (oidsToLoad.isEmpty()) { + return; + } + + if (includeDisplayNames) { + // Query m_object with LEFT JOIN to m_user and m_abstract_role for display names + QObject qObj = new QObject<>(MObject.class, "obj"); + QUser qUser = new QUser("usr"); + QAbstractRole qRole = new QAbstractRole<>(MAbstractRole.class, "ar"); + + List rows = session.newQuery() + .select(qObj.oid, qObj.nameOrig, qUser.fullNameOrig, qRole.displayNameOrig) + .from(qObj) + .leftJoin(qUser).on(qObj.oid.eq(qUser.oid)) + .leftJoin(qRole).on(qObj.oid.eq(qRole.oid)) + .where(qObj.oid.in(oidsToLoad)) + .fetch(); + + for (Tuple row : rows) { + UUID oid = row.get(qObj.oid); + String fullName = row.get(qUser.fullNameOrig); + String roleDisplayName = row.get(qRole.displayNameOrig); + String name = row.get(qObj.nameOrig); + + // fullName for Users, displayName for AbstractRoles, null otherwise + // GUI components decide whether to fallback to name + String displayName = fullName != null ? fullName : roleDisplayName; + + resolvedNamesCache.put(oid, new ResolvedNames(name, displayName)); + } + } else { + // Simple name-only query + QObject qObj = new QObject<>(MObject.class, "obj"); + + List rows = session.newQuery() + .select(qObj.oid, qObj.nameOrig) + .from(qObj) + .where(qObj.oid.in(oidsToLoad)) + .fetch(); + + for (Tuple row : rows) { + UUID oid = row.get(qObj.oid); + String name = row.get(qObj.nameOrig); + resolvedNamesCache.put(oid, new ResolvedNames(name, null)); + } + } + } + + @Override + public ResolvedNames getResolvedNames(UUID oid) { + return resolvedNamesCache.get(oid); } } } diff --git a/repo/repo-sqale/src/main/java/com/evolveum/midpoint/repo/sqale/qmodel/accesscert/QAccessCertificationWorkItemMapping.java b/repo/repo-sqale/src/main/java/com/evolveum/midpoint/repo/sqale/qmodel/accesscert/QAccessCertificationWorkItemMapping.java index b07d3712800..d8c61af3463 100644 --- a/repo/repo-sqale/src/main/java/com/evolveum/midpoint/repo/sqale/qmodel/accesscert/QAccessCertificationWorkItemMapping.java +++ b/repo/repo-sqale/src/main/java/com/evolveum/midpoint/repo/sqale/qmodel/accesscert/QAccessCertificationWorkItemMapping.java @@ -8,11 +8,16 @@ import static com.evolveum.midpoint.util.MiscUtil.asXMLGregorianCalendar; import static com.evolveum.midpoint.xml.ns._public.common.common_3.AccessCertificationWorkItemType.*; +import static com.querydsl.core.types.dsl.Expressions.stringTemplate; +import java.nio.charset.StandardCharsets; import java.util.*; +import java.util.stream.Collectors; +import javax.xml.namespace.QName; import com.evolveum.midpoint.schema.GetOperationOptions; import com.evolveum.midpoint.schema.SelectorOptions; +import com.evolveum.prism.xml.ns._public.types_3.PolyStringType; import com.querydsl.core.Tuple; import org.jetbrains.annotations.NotNull; @@ -22,6 +27,7 @@ import com.evolveum.midpoint.prism.PrismObject; import com.evolveum.midpoint.repo.sqale.SqaleQueryContext; import com.evolveum.midpoint.repo.sqale.SqaleRepoContext; +import com.evolveum.midpoint.repo.sqale.mapping.ReferenceNameResolver; import com.evolveum.midpoint.repo.sqale.qmodel.common.QContainerMapping; import com.evolveum.midpoint.repo.sqale.qmodel.focus.QUserMapping; import com.evolveum.midpoint.repo.sqlbase.JdbcSession; @@ -35,6 +41,14 @@ import com.evolveum.midpoint.xml.ns._public.common.common_3.AccessCertificationCampaignType; import com.evolveum.midpoint.xml.ns._public.common.common_3.AccessCertificationCaseType; import com.evolveum.midpoint.xml.ns._public.common.common_3.AccessCertificationWorkItemType; +import com.evolveum.midpoint.xml.ns._public.common.common_3.ActivationType; +import com.evolveum.midpoint.xml.ns._public.common.common_3.ArchetypeType; +import com.evolveum.midpoint.xml.ns._public.common.common_3.ObjectReferenceType; +import com.evolveum.midpoint.xml.ns._public.common.common_3.ObjectType; +import com.evolveum.midpoint.xml.ns._public.common.common_3.OrgType; +import com.evolveum.midpoint.xml.ns._public.common.common_3.RoleType; +import com.evolveum.midpoint.xml.ns._public.common.common_3.ServiceType; +import com.evolveum.midpoint.xml.ns._public.common.common_3.UserType; /** * Mapping between {@link QAccessCertificationWorkItem} and {@link AccessCertificationWorkItemType}. @@ -169,67 +183,492 @@ public MAccessCertificationWorkItem insert( public ResultListRowTransformer createRowTransformer( SqlQueryContext sqlQueryContext, JdbcSession jdbcSession, Collection> options) { - Map> cache = new HashMap<>(); - return (tuple, entityPath) -> { + return new WorkItemRowTransformer(sqlQueryContext, jdbcSession, options); + } + + /** + * Row transformer for AccessCertificationWorkItem that batch-loads Campaign and Case data + * in beforeTransformation to avoid N+1 query problem. + * + *

Uses "clone-based isolation" strategy where each WorkItem is returned with its own + * isolated Campaign > Case > WorkItem hierarchy. This is required for Model layer's + * security processing and GUI column access patterns. + * + * @see #transform(Tuple, QAccessCertificationWorkItem) for why clone is required + */ + private class WorkItemRowTransformer implements + ResultListRowTransformer { + + private final SqlQueryContext sqlQueryContext; + private final JdbcSession jdbcSession; + + // Per-batch caches (cleared in beforeTransformation) + private Map> campaignCache = new HashMap<>(); + private Map caseCache = new HashMap<>(); // key: "ownerOid_caseCid" + private Map> assigneeRefCache = new HashMap<>(); // key: "ownerOid_workItemCid" + private Map> candidateRefCache = new HashMap<>(); + + // Clone optimization: reuse isolated Campaign/Case for consecutive WorkItems from same Case + private String lastCaseKey = null; + private PrismObject lastIsolatedCampaign = null; + private AccessCertificationCaseType lastIsolatedCase = null; + + // Cross-batch cache for resolved reference names + private final ReferenceNameResolver sharedResolver; + + WorkItemRowTransformer( + SqlQueryContext sqlQueryContext, + JdbcSession jdbcSession, + Collection> options) { + this.sqlQueryContext = sqlQueryContext; + this.jdbcSession = jdbcSession; + this.sharedResolver = ReferenceNameResolver.from(options); + } + + @Override + public void beforeTransformation(List tuples, QAccessCertificationWorkItem entityPath) throws SchemaException { + // Clear per-batch caches (but preserve sharedResolver across batches) + campaignCache.clear(); + caseCache.clear(); + assigneeRefCache.clear(); + candidateRefCache.clear(); + lastCaseKey = null; + lastIsolatedCampaign = null; + lastIsolatedCase = null; + + // Collect unique Campaign OIDs, Case CIDs, WorkItem CIDs, and WorkItem keys from this batch + Set campaignOids = new HashSet<>(); + Map> ownerToCaseCids = new HashMap<>(); + Set workItemCids = new HashSet<>(); + Set workItemKeys = new HashSet<>(); + + for (Tuple tuple : tuples) { + MAccessCertificationWorkItem row = Objects.requireNonNull(tuple.get(entityPath)); + campaignOids.add(row.ownerOid); + ownerToCaseCids.computeIfAbsent(row.ownerOid, k -> new HashSet<>()).add(row.accessCertCaseCid); + workItemCids.add(row.cid); + workItemKeys.add(row.ownerOid + "_" + row.cid); + } + + // Batch load Campaigns + loadCampaignsBatch(campaignOids); + + // Batch load Cases with filtered WorkItems from fullObject + // This provides full WorkItem attributes (output/comment, deadline, etc.) while filtering out + // unnecessary WorkItems (those not in this batch) + loadCasesBatchWithFilteredWorkItems(ownerToCaseCids, workItemCids); + + // Batch load WorkItem references (assigneeRef, candidateRef) from reference tables + // (performerRef is in WorkItem row itself, no separate load needed) + loadWorkItemRefsBatch(campaignOids, workItemCids, workItemKeys); + + // Batch resolve names and display names for all references + // (sharedResolver caches across batches, so redundant OIDs are automatically skipped) + Set refOidsToLoad = new HashSet<>(); + + // Collect OIDs from Case references (objectRef, targetRef, tenantRef, orgRef) + for (AccessCertificationCaseType caseType : caseCache.values()) { + collectRefOid(caseType.getObjectRef(), refOidsToLoad); + collectRefOid(caseType.getTargetRef(), refOidsToLoad); + collectRefOid(caseType.getTenantRef(), refOidsToLoad); + collectRefOid(caseType.getOrgRef(), refOidsToLoad); + } + + // Collect OIDs from WorkItem references (assigneeRef, candidateRef, performerRef) + for (List refs : assigneeRefCache.values()) { + for (ObjectReferenceType ref : refs) { + collectRefOid(ref, refOidsToLoad); + } + } + for (List refs : candidateRefCache.values()) { + for (ObjectReferenceType ref : refs) { + collectRefOid(ref, refOidsToLoad); + } + } + // Collect performerRef OIDs from WorkItem rows + for (Tuple tuple : tuples) { + MAccessCertificationWorkItem row = tuple.get(entityPath); + if (row != null && row.performerRefTargetOid != null) { + refOidsToLoad.add(row.performerRefTargetOid); + } + } + + // Batch resolve with display names + sharedResolver.batchResolve(jdbcSession, refOidsToLoad, true); + + // Apply resolved names to Case references (will be inherited by clone) + for (AccessCertificationCaseType caseType : caseCache.values()) { + setDisplayNameOnRef(caseType.getObjectRef()); + setDisplayNameOnRef(caseType.getTargetRef()); + setDisplayNameOnRef(caseType.getTenantRef()); + setDisplayNameOnRef(caseType.getOrgRef()); + } + + // Apply resolved names to WorkItem references + for (List refs : assigneeRefCache.values()) { + for (ObjectReferenceType ref : refs) { + setDisplayNameOnRef(ref); + } + } + for (List refs : candidateRefCache.values()) { + for (ObjectReferenceType ref : refs) { + setDisplayNameOnRef(ref); + } + } + } + + private void collectRefOid(ObjectReferenceType ref, Set oids) { + if (ref != null && ref.getOid() != null) { + oids.add(UUID.fromString(ref.getOid())); + } + } + + /** + * Set a lightweight object with display name on the reference for GUI column use. + * Uses sharedResolver's cached name and display name. + */ + private void setDisplayNameOnRef(ObjectReferenceType ref) { + if (ref == null || ref.getOid() == null) { + return; + } + UUID oid = UUID.fromString(ref.getOid()); + ReferenceNameResolver.ResolvedNames resolved = sharedResolver.getResolvedNames(oid); + if (resolved == null) { + return; + } + + // Set targetName (name attribute) for ObjectReferenceColumn + if (resolved.name() != null) { + ref.setTargetName(PolyStringType.fromOrig(resolved.name())); + } + + // Set object with name and displayName/fullName for GUI columns + if (resolved.name() != null || resolved.displayName() != null) { + ObjectType obj = createMinimalObjectForDisplay(ref.getType(), resolved.name(), resolved.displayName()); + if (obj != null) { + obj.setOid(ref.getOid()); + ref.asReferenceValue().setObject(obj.asPrismObject()); + } + } + } + + /** + * Create minimal ObjectType subclass with name and display name set. + * Name is always set to prevent NPE. DisplayName/fullName is set based on type. + */ + private ObjectType createMinimalObjectForDisplay(QName type, String name, String displayName) { + if (type == null) { + return null; + } + ObjectType obj; + if (UserType.COMPLEX_TYPE.equals(type)) { + UserType user = new UserType(); + if (displayName != null) { + user.setFullName(PolyStringType.fromOrig(displayName)); + } + obj = user; + } else if (RoleType.COMPLEX_TYPE.equals(type) + || ServiceType.COMPLEX_TYPE.equals(type) + || ArchetypeType.COMPLEX_TYPE.equals(type) + || OrgType.COMPLEX_TYPE.equals(type)) { + // For AbstractRole types, set displayName + RoleType role = new RoleType(); + if (displayName != null) { + role.setDisplayName(PolyStringType.fromOrig(displayName)); + } + obj = role; + } else { + obj = new ObjectType() {}; + } + // Always set name to prevent NPE when getName() is called + if (name != null) { + obj.setName(PolyStringType.fromOrig(name)); + } + return obj; + } + + private void loadCampaignsBatch(Set campaignOids) { + if (campaignOids.isEmpty()) { + return; + } + QAccessCertificationCampaignMapping mapping = + QAccessCertificationCampaignMapping.getAccessCertificationCampaignMapping(); + QAccessCertificationCampaign qCampaign = mapping.defaultAlias(); + + List rows = jdbcSession.newQuery() + .select(qCampaign.oid, qCampaign.fullObject) + .from(qCampaign) + .where(qCampaign.oid.in(campaignOids)) + .fetch(); + + for (Tuple row : rows) { + UUID oid = row.get(qCampaign.oid); + byte[] fullObject = row.get(qCampaign.fullObject); + try { + AccessCertificationCampaignType campaign = mapping.parseSchemaObject(fullObject, oid.toString()); + campaignCache.put(oid, campaign.asPrismObject()); + } catch (SchemaException e) { + throw new SystemException("Failed to parse campaign " + oid, e); + } + } + } + + /** + * Load Cases with filtered WorkItems from fullObject. + * This method fetches Case fullObject and filters the embedded WorkItem array using + * PostgreSQL JSONB functions to include only WorkItems in the current batch. + * This provides full WorkItem attributes (output/comment, deadline, name, etc.) that + * are only available in fullObject, while avoiding loading unnecessary WorkItems. + * + *

Note: Currently fullObject is stored as bytea, requiring convert_from(fullObject, 'UTF8')::jsonb + * conversion in SQL. If fullObject column is changed to JSONB type in the future, this SQL + * can be simplified and optimized by removing the convert_from/cast overhead: + *

+         * -- Current (bytea):  (convert_from(fullObject, 'UTF8')::jsonb)->'case'
+         * -- Future (jsonb):   fullObject->'case'
+         * 
+ */ + private void loadCasesBatchWithFilteredWorkItems( + Map> ownerToCaseCids, + Set workItemCids) { + if (ownerToCaseCids.isEmpty()) { + return; + } + + QAccessCertificationCaseMapping caseMapping = + QAccessCertificationCaseMapping.getAccessCertificationCaseMapping(); + QAccessCertificationCase qCase = caseMapping.defaultAlias(); + + // Build WorkItem CID list as SQL literal for JSONB filtering + // These are Long values so safe to embed directly (no SQL injection risk) + String workItemCidsLiteral = workItemCids.stream() + .map(String::valueOf) + .collect(Collectors.joining(", ")); + + // SQL expression to filter WorkItems in the JSONB fullObject: + // 1. Convert fullObject from bytea to text, then to jsonb, and extract the 'case' object + // 2. Filter the 'workItem' array to keep only WorkItems with @id in our batch + // 3. Reconstruct the JSON with filtered workItem array + // Note: convert_from({0}, 'UTF8') converts bytea to text, then ::jsonb parses as JSON + // Note: 'case' is a PostgreSQL reserved word, so we use ->'case' with single quotes + // which requires careful escaping in stringTemplate + String sql = "jsonb_set(" + + "(convert_from({0}, 'UTF8')::jsonb)->'case', " + + "'{workItem}', " + + "(SELECT COALESCE(jsonb_agg(wi), '[]'::jsonb) " + + "FROM jsonb_array_elements((convert_from({0}, 'UTF8')::jsonb)->'case'->'workItem') as wi " + + "WHERE (wi->>'@id')::bigint IN (" + workItemCidsLiteral + "))" + + ")"; + var filteredCaseJson = stringTemplate(sql, qCase.fullObject); + + // Build query with precise (ownerOid, cid) filtering + var query = jdbcSession.newQuery() + .from(qCase) + .select(qCase.ownerOid, qCase.cid, filteredCaseJson); + + if (ownerToCaseCids.size() == 1) { + // Single campaign - simple query + Map.Entry> entry = ownerToCaseCids.entrySet().iterator().next(); + query.where(qCase.ownerOid.eq(entry.getKey()).and(qCase.cid.in(entry.getValue()))); + } else { + // Multiple campaigns - build OR conditions + var predicate = qCase.ownerOid.isNull(); // Start with false-like predicate + for (Map.Entry> entry : ownerToCaseCids.entrySet()) { + predicate = predicate.or( + qCase.ownerOid.eq(entry.getKey()).and(qCase.cid.in(entry.getValue()))); + } + query.where(predicate); + } + + List rows = query.fetch(); + + for (Tuple row : rows) { + UUID ownerOid = row.get(qCase.ownerOid); + Long cid = row.get(qCase.cid); + String filteredJson = row.get(2, String.class); + String caseKey = ownerOid + "_" + cid; + + try { + // Parse the filtered Case JSON (includes WorkItems with all attributes) + AccessCertificationCaseType caseObj = parseFilteredCaseJson(filteredJson, ownerOid, cid); + caseCache.put(caseKey, caseObj); + } catch (SchemaException e) { + throw new SystemException("Failed to parse filtered case " + caseKey, e); + } + } + } + + /** + * Parse filtered Case JSON back to AccessCertificationCaseType. + * The JSON needs to be wrapped in {"case": ...} format for parseSchemaObject(). + */ + private AccessCertificationCaseType parseFilteredCaseJson( + String filteredJson, UUID ownerOid, Long cid) throws SchemaException { + // Wrap JSON in {"case": ...} format expected by parseSchemaObject + String wrappedJson = "{\"case\":" + filteredJson + "}"; + byte[] bytes = wrappedJson.getBytes(StandardCharsets.UTF_8); + + QAccessCertificationCaseMapping caseMapping = + QAccessCertificationCaseMapping.getAccessCertificationCaseMapping(); + return caseMapping.parseSchemaObject(bytes, ownerOid + "," + cid); + } + + /** + * Batch load WorkItem references (assigneeRef, candidateRef) from reference tables. + * performerRef is in WorkItem row itself, no separate load needed. + */ + private void loadWorkItemRefsBatch(Set ownerOids, Set workItemCids, Set workItemKeys) { + if (ownerOids.isEmpty() || workItemCids.isEmpty()) { + return; + } + + // Load assigneeRef - filter by both ownerOid AND workItemCid to avoid loading all refs + QAccessCertificationWorkItemReference qAssignee = + QAccessCertificationWorkItemReferenceMapping.getForCaseWorkItemAssignee().defaultAlias(); + List assigneeRows = jdbcSession.newQuery() + .from(qAssignee) + .select(qAssignee) + .where(qAssignee.ownerOid.in(ownerOids) + .and(qAssignee.accessCertWorkItemCid.in(workItemCids))) + .fetch(); + + for (MAccessCertificationWorkItemReference ref : assigneeRows) { + String key = ref.ownerOid + "_" + ref.accessCertWorkItemCid; + if (workItemKeys.contains(key)) { + assigneeRefCache.computeIfAbsent(key, k -> new ArrayList<>()) + .add(objectReference(ref.targetOid, ref.targetType, ref.relationId)); + } + } + + // Load candidateRef - filter by both ownerOid AND workItemCid to avoid loading all refs + QAccessCertificationWorkItemReference qCandidate = + QAccessCertificationWorkItemReferenceMapping.getForCaseWorkItemCandidate().defaultAlias(); + List candidateRows = jdbcSession.newQuery() + .from(qCandidate) + .select(qCandidate) + .where(qCandidate.ownerOid.in(ownerOids) + .and(qCandidate.accessCertWorkItemCid.in(workItemCids))) + .fetch(); + + for (MAccessCertificationWorkItemReference ref : candidateRows) { + String key = ref.ownerOid + "_" + ref.accessCertWorkItemCid; + if (workItemKeys.contains(key)) { + candidateRefCache.computeIfAbsent(key, k -> new ArrayList<>()) + .add(objectReference(ref.targetOid, ref.targetType, ref.relationId)); + } + } + } + + /** + * Transform a database row into AccessCertificationWorkItemType with proper Prism hierarchy. + * + *

Why Clone is Required: + * Each WorkItem must have an isolated Campaign > Case > WorkItem hierarchy because: + *

    + *
  1. Model layer's security processing operates on root objects (Campaign) and may + * prune child elements. Shared hierarchy would cause unintended side effects.
  2. + *
  3. GUI columns traverse Prism hierarchy to find parent Case, expecting isolated structure.
  4. + *
+ * + *

Alternative Considered: + * A shared hierarchy approach was tested but proved ~3.5x slower due to Model layer's + * security processing overhead on large shared objects. + */ + @Override + public AccessCertificationWorkItemType transform(Tuple tuple, QAccessCertificationWorkItem entityPath) { MAccessCertificationWorkItem row = Objects.requireNonNull(tuple.get(entityPath)); UUID ownerOid = row.ownerOid; - PrismObject owner = cache.get(ownerOid); - // FIXME: Should we load cases we need, instead of all cases? - if (owner == null) { - owner = ((SqaleQueryContext) sqlQueryContext).loadObject( - jdbcSession, AccessCertificationCampaignType.class, ownerOid, Collections.emptyList()); - cache.put(ownerOid, owner); - } - PrismContainer caseContainer; + String caseKey = ownerOid + "_" + row.accessCertCaseCid; + + // Reuse isolated Campaign/Case if same as last row (consecutive WorkItems from same Case) + // This optimization skips Campaign/Case clone when WorkItems are ordered by Case + AccessCertificationWorkItemType workItem; + if (caseKey.equals(lastCaseKey)) { + // Same Case - reuse isolated structures, just clone and add the WorkItem + workItem = findAndCloneWorkItem(caseKey, row.cid); + } else { + // Different Case - create new isolated Campaign/Case structures + lastCaseKey = caseKey; + lastIsolatedCampaign = cloneCampaign(ownerOid); + lastIsolatedCase = cloneCase(caseKey, lastIsolatedCampaign); + workItem = findAndCloneWorkItem(caseKey, row.cid); + } + + // Add WorkItem to isolated Case (WorkItem is cloned, so safe to add) + // This establishes the Prism hierarchy: Campaign > Case > WorkItem try { - caseContainer = owner.findOrCreateContainer(AccessCertificationCampaignType.F_CASE); + lastIsolatedCase.asPrismContainerValue() + .findOrCreateContainer(AccessCertificationCaseType.F_WORK_ITEM) + .add(workItem.asPrismContainerValue()); } catch (SchemaException e) { - throw new SystemException("Should not happened", e); + throw new SystemException("Failed to add work item to case", e); + } + + // Set targetName on performerRef + setDisplayNameOnRef(workItem.getPerformerRef()); + + // Replace assigneeRef and candidateRef with resolved ones from reference cache + // (fullObject contains references but without targetName/displayName resolved) + String refKey = ownerOid + "_" + row.cid; + List assigneeRefs = assigneeRefCache.get(refKey); + List candidateRefs = candidateRefCache.get(refKey); + + workItem.getAssigneeRef().clear(); + workItem.getCandidateRef().clear(); + if (assigneeRefs != null) { + workItem.getAssigneeRef().addAll(assigneeRefs); + } + if (candidateRefs != null) { + workItem.getCandidateRef().addAll(candidateRefs); + } + + attachContainerIdPath(workItem, tuple, entityPath); + return workItem; + } + + /** Clone Campaign from cache to create an isolated hierarchy for this WorkItem. */ + private PrismObject cloneCampaign(UUID ownerOid) { + PrismObject cached = campaignCache.get(ownerOid); + if (cached == null) { + return null; + } + return cached.clone(); + } + + /** Clone Case from cache and attach it to the isolated Campaign. WorkItems are cleared. */ + private AccessCertificationCaseType cloneCase(String caseKey, PrismObject isolatedCampaign) { + AccessCertificationCaseType cached = caseCache.get(caseKey); + if (cached == null) { + return null; } - PrismContainerValue aCase = caseContainer.findValue(row.accessCertCaseCid); - if (aCase == null) { - aCase = loadCase(jdbcSession, ownerOid, row.accessCertCaseCid); + + AccessCertificationCaseType cloned = cached.clone(); + cloned.getWorkItem().clear(); + + if (isolatedCampaign != null) { try { - caseContainer.addIgnoringEquivalents(aCase); + PrismContainer caseContainer = + isolatedCampaign.findOrCreateContainer(AccessCertificationCampaignType.F_CASE); + caseContainer.add(cloned.asPrismContainerValue()); } catch (SchemaException e) { - throw new SystemException(e); + throw new SystemException("Failed to attach case to campaign: " + caseKey, e); } } - resolveReferenceNames(aCase.asContainerable(), jdbcSession, options); + return cloned; + } - PrismContainer container = - aCase.findContainer(AccessCertificationCaseType.F_WORK_ITEM); - if (container == null) { - throw new SystemException("Campaign " + owner + "has no work item for case with ID " + row.accessCertCaseCid); + /** Find and clone a specific WorkItem from the cached Case (which has full attributes from fullObject). */ + private AccessCertificationWorkItemType findAndCloneWorkItem(String caseKey, Long workItemCid) { + AccessCertificationCaseType cached = caseCache.get(caseKey); + if (cached == null) { + throw new SystemException("Case not found in cache: " + caseKey); } - PrismContainerValue value = container.findValue(row.cid); - if (value == null) { - throw new SystemException("Campaign " + owner + "has no work item with ID " + row.cid); + for (AccessCertificationWorkItemType wi : cached.getWorkItem()) { + if (wi.getId().equals(workItemCid)) { + return wi.clone(); + } } - @NotNull AccessCertificationWorkItemType ret = value.asContainerable(); - attachContainerIdPath(ret, tuple, entityPath); - resolveReferenceNames(ret, jdbcSession, options); - return ret; - }; - } - - private PrismContainerValue loadCase( - JdbcSession jdbcSession, UUID ownerOid, Long accessCertCaseCid) { - QAccessCertificationCaseMapping mapping = QAccessCertificationCaseMapping.getAccessCertificationCaseMapping(); - QAccessCertificationCase root = mapping.defaultAlias(); - Tuple result = jdbcSession.newQuery() - .from(root) - .select(mapping.selectExpressions(root, Collections.emptyList())) - .where(root.ownerOid.eq(ownerOid).and(root.cid.eq(accessCertCaseCid))) - .fetchOne(); - if (result == null) { - throw new SystemException("Case owner:" + ownerOid + " cid:" + accessCertCaseCid + " does not exist."); - } - try { - //noinspection unchecked - return mapping.toSchemaObject(result, root, jdbcSession, Collections.emptyList()).asPrismContainerValue(); - } catch (SchemaException e) { - throw new SystemException(e); + throw new SystemException("WorkItem not found in cached case: " + caseKey + ", workItemCid: " + workItemCid); } } diff --git a/repo/repo-sqlbase/src/main/java/com/evolveum/midpoint/repo/sqlbase/SqlQueryContext.java b/repo/repo-sqlbase/src/main/java/com/evolveum/midpoint/repo/sqlbase/SqlQueryContext.java index 7c003f320b4..703dbed8bac 100644 --- a/repo/repo-sqlbase/src/main/java/com/evolveum/midpoint/repo/sqlbase/SqlQueryContext.java +++ b/repo/repo-sqlbase/src/main/java/com/evolveum/midpoint/repo/sqlbase/SqlQueryContext.java @@ -6,9 +6,12 @@ package com.evolveum.midpoint.repo.sqlbase; +import java.sql.Connection; +import java.sql.SQLException; import java.util.*; import java.util.function.BiFunction; import java.util.stream.Collectors; +import java.util.stream.Stream; import javax.xml.namespace.QName; import com.querydsl.core.QueryMetadata; @@ -458,6 +461,40 @@ public int executeCount(JdbcSession jdbcSession) { .fetchCount(); } + /** + * Executes query with JDBC streaming (cursor-based fetch). + * Unlike {@link #executeQuery}, this does NOT load all rows into memory. + * PostgreSQL requires autoCommit=false and fetchSize>0 for streaming. + * + * @param jdbcSession JDBC session (must be in transaction, i.e., autoCommit=false) + * @param fetchSize number of rows to fetch at a time from the database + * @return Stream of Tuple that must be consumed within the same transaction + */ + public Stream executeQueryStreaming(JdbcSession jdbcSession, int fetchSize) throws QueryException { + Connection connection = jdbcSession.connection(); + + // PostgreSQL requires autoCommit=false for cursor-based streaming + try { + if (connection.getAutoCommit()) { + throw new QueryException("Streaming query requires autoCommit=false (transaction mode)"); + } + } catch (SQLException e) { + throw new QueryException("Failed to check autoCommit status", e); + } + + SQLQuery query = sqlQuery.clone(connection); + + // Set fetch size for JDBC cursor-based streaming + query.setStatementOptions(com.querydsl.sql.StatementOptions.builder() + .setFetchSize(fetchSize) + .build()); + + Q entity = root(); + return query + .select(buildSelectExpressions(entity, query)) + .stream(); + } + /** * Adds new LEFT JOIN to the query and returns {@link SqlQueryContext} for this join path. * The returned context still uses the same SQL query; any further filter processing will diff --git a/repo/repo-sqlbase/src/main/java/com/evolveum/midpoint/repo/sqlbase/SqlQueryExecutor.java b/repo/repo-sqlbase/src/main/java/com/evolveum/midpoint/repo/sqlbase/SqlQueryExecutor.java index 4cd541aa63b..6175b1eeaf0 100644 --- a/repo/repo-sqlbase/src/main/java/com/evolveum/midpoint/repo/sqlbase/SqlQueryExecutor.java +++ b/repo/repo-sqlbase/src/main/java/com/evolveum/midpoint/repo/sqlbase/SqlQueryExecutor.java @@ -6,17 +6,25 @@ package com.evolveum.midpoint.repo.sqlbase; +import java.util.ArrayList; import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.function.Function; +import java.util.stream.Stream; import com.querydsl.core.Tuple; import org.jetbrains.annotations.NotNull; import com.evolveum.midpoint.prism.query.ObjectQuery; +import com.evolveum.midpoint.repo.sqlbase.mapping.ResultListRowTransformer; import com.evolveum.midpoint.repo.sqlbase.querydsl.FlexibleRelationalPathBase; import com.evolveum.midpoint.schema.GetOperationOptions; +import com.evolveum.midpoint.schema.ObjectHandler; import com.evolveum.midpoint.schema.SearchResultList; import com.evolveum.midpoint.schema.SearchResultMetadata; import com.evolveum.midpoint.schema.SelectorOptions; +import com.evolveum.midpoint.schema.result.OperationResult; import com.evolveum.midpoint.util.exception.SchemaException; /** @@ -65,7 +73,6 @@ public , R> int count( context.beforeQuery(); PageOf result; try (JdbcSession jdbcSession = sqlRepoContext.newJdbcSession().startReadOnlyTransaction()) { - var opResult = SqlBaseOperationTracker.fetchMultiplePrimaries(); try (var ignored = SqlBaseOperationTracker.fetchMultiplePrimaries()){ result = context.executeQuery(jdbcSession); } @@ -82,4 +89,123 @@ private SearchResultList createSearchResultList(PageOf result) { } return new SearchResultList<>(result.content(), metadata); } + + /** + * Batch size for mini-batch processing during row transformation. + * This allows beforeTransformation to fetch child data (assignments, references) + * in batches instead of one-by-one, avoiding N+1 query problem. + * + * Used in two scenarios: + * - Streaming queries: rows are processed in mini-batches of this size + * - Non-streaming list queries (GUI page display): beforeTransformation receives + * the entire page at once (typically <= 100 rows) + * + * This value should be aligned with the maximum page size in GUI (currently 100). + * If GUI allows page sizes larger than this value in the future + * (e.g., via objectCollection configuration), this value should be adjusted accordingly + * or made configurable. + */ + private static final int TRANSFORM_BATCH_SIZE = 100; + + /** + * Streaming iterative search that processes rows one by one without loading all into memory. + * Uses JDBC cursor-based streaming with configurable fetch size. + * + * @param context query context + * @param query object query (may be null) + * @param options get operation options + * @param handler handler called for each transformed result + * @param operationResult operation result for handler + * @param fetchSize JDBC fetch size for streaming + * @return number of processed items + */ + public , R> int listStreaming( + @NotNull SqlQueryContext context, + ObjectQuery query, + Collection> options, + @NotNull ObjectHandler handler, + @NotNull OperationResult operationResult, + int fetchSize) + throws RepositoryException, SchemaException { + + if (query != null) { + context.processFilter(query.getFilter()); + context.processObjectPaging(query.getPaging()); + } + context.processOptions(options); + context.beforeQuery(); + + int count = 0; + // PostgreSQL streaming requires autoCommit=false, use read-only transaction for optimization + try (JdbcSession jdbcSession = sqlRepoContext.newJdbcSession().startReadOnlyTransaction()) { + try (Stream stream = context.executeQueryStreaming(jdbcSession, fetchSize)) { + Q entityPath = context.path(); + Iterator iterator = stream.iterator(); + + // Process in mini-batches to allow beforeTransformation to fetch child data + List batch = new ArrayList<>(TRANSFORM_BATCH_SIZE); + + while (iterator.hasNext()) { + batch.add(iterator.next()); + + if (batch.size() >= TRANSFORM_BATCH_SIZE) { + count += processBatch(context, jdbcSession, options, entityPath, batch, handler, operationResult); + if (count < 0) { + // Handler returned false, stop processing + count = -count; + break; + } + batch.clear(); + } + } + + // Process remaining items in the last batch + if (!batch.isEmpty()) { + int lastBatchCount = processBatch(context, jdbcSession, options, entityPath, batch, handler, operationResult); + if (lastBatchCount < 0) { + count += -lastBatchCount; + } else { + count += lastBatchCount; + } + } + } + + jdbcSession.commit(); + return count; + } + } + + /** + * Process a batch of tuples: call beforeTransformation, then transform and handle each. + * Returns positive count if all processed, negative count if handler returned false. + */ + private , R> int processBatch( + SqlQueryContext context, + JdbcSession jdbcSession, + Collection> options, + Q entityPath, + List batch, + ObjectHandler handler, + OperationResult operationResult) throws SchemaException { + + ResultListRowTransformer rowTransformer = + context.mapping().createRowTransformer(context, jdbcSession, options); + + // Call beforeTransformation with the batch - this fetches child data (assignments, etc.) + rowTransformer.beforeTransformation(batch, entityPath); + + int count = 0; + for (Tuple tuple : batch) { + S transformed = rowTransformer.transform(tuple, entityPath); + + if (!handler.handle(transformed, operationResult)) { + // Return negative to signal early termination + return -(count + 1); + } + count++; + } + + rowTransformer.finishTransformation(); + return count; + } }