net.fit(trainIter,npochs) encountered No next element error

I am learning from https://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/convolution/AnimalsClassification.java and run to fit (trainIterer , nEpochs) encountered No next element error, please tell me why

import javafx.util.Pair;
import org.datavec.api.io.filters.BalancedPathFilter;
import org.datavec.api.io.filters.RandomPathFilter;
import org.datavec.api.io.labels.ParentPathLabelGenerator;
import org.datavec.api.split.FileSplit;
import org.datavec.api.split.InputSplit;
import org.datavec.image.loader.BaseImageLoader;
import org.datavec.image.loader.NativeImageLoader;
import org.datavec.image.recordreader.ImageRecordReader;
import org.datavec.image.transform.*;
import org.deeplearning4j.api.storage.StatsStorage;
import org.deeplearning4j.datasets.datavec.RecordReaderDataSetIterator;
import org.deeplearning4j.nn.conf.GradientNormalization;
import org.deeplearning4j.nn.conf.MultiLayerConfiguration;
import org.deeplearning4j.nn.conf.NeuralNetConfiguration;
import org.deeplearning4j.nn.conf.distribution.Distribution;
import org.deeplearning4j.nn.conf.distribution.NormalDistribution;
import org.deeplearning4j.nn.conf.inputs.InputType;
import org.deeplearning4j.nn.conf.layers.*;
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork;
import org.deeplearning4j.nn.weights.WeightInitDistribution;
import org.deeplearning4j.optimize.api.InvocationType;
import org.deeplearning4j.optimize.listeners.EvaluativeListener;
import org.deeplearning4j.optimize.listeners.ScoreIterationListener;
import org.deeplearning4j.ui.VertxUIServer;
import org.deeplearning4j.ui.api.UIServer;
import org.deeplearning4j.ui.stats.StatsListener;
import org.deeplearning4j.ui.storage.FileStatsStorage;
import org.deeplearning4j.ui.storage.InMemoryStatsStorage;
import org.deeplearning4j.util.ModelSerializer;
import org.nd4j.linalg.activations.Activation;
import org.nd4j.linalg.dataset.DataSet;
import org.nd4j.linalg.dataset.api.iterator.DataSetIterator;
import org.nd4j.linalg.dataset.api.preprocessor.DataNormalization;
import org.nd4j.linalg.dataset.api.preprocessor.ImagePreProcessingScaler;
import org.nd4j.linalg.learning.config.AdaDelta;
import org.nd4j.linalg.learning.config.Nesterovs;
import org.nd4j.linalg.lossfunctions.LossFunctions;
import org.nd4j.linalg.schedule.ScheduleType;
import org.nd4j.linalg.schedule.StepSchedule;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.File;
import java.util.Arrays;
import java.util.List;
import java.util.Random;

/**
 * @version 1.0.0
 * @ClassName ModelCreate.java
 * @Description TODO
 * @createTime 2020年03月15日 14:11:00
 */
public class ModelCreate {
    private static final Logger log= LoggerFactory.getLogger(ModelCreate.class);

    //图片的格式由allowedExtension指定
    private static final String [] allowedExtensions = BaseImageLoader.ALLOWED_FORMATS;

    private static final int nChannels=3;//输入的通道个数;彩图3
    private static final int width=227;//输入宽度
    private static final int height=227;//高度
    private static final int batchSize=64;//测试批量的大小
    private static final int nEpochs=300;//训练轮数
    private static int numLabels;//训练标签数量

    private static final int seed=123456;

    private static final Random rng=new Random(seed);//随机种子

    public static void main(String[] args) throws Exception {

        /**
         * 加载图像数据
         */

        log.info("开始加载图像文件");
        File mainPath = new File("D:\\data");//本机数据集地址

        //在父目录下的目录中具有“允许扩展名”分割的文件,在将文件分割为训练和测试时,需要使用随机数生成器以确保可重复性
        FileSplit filesInDir =  new FileSplit(mainPath,allowedExtensions,rng);

        System.out.println("数量"+ filesInDir.length());
        //解析父目录,并使用子目录的名称作为标签/类名称
        ParentPathLabelGenerator labelMaker =  new ParentPathLabelGenerator();

        //随机采样
        RandomPathFilter randomPathFilter=new RandomPathFilter(rng);
        //将图像文件拆分为训练和测试。将测试比例指定为80%,20%
        InputSplit [] filesInDirSplit = filesInDir.sample(randomPathFilter,95,5);
        InputSplit trainData = filesInDirSplit [0];//训练集
        InputSplit testData = filesInDirSplit [1]; //测试集

        log.info("trainData URI String Length={},testData URI String Length={}", trainData.length(), testData.length());

        log.info("开始数据增强");

        //数据标准化
        DataNormalization scaler=new ImagePreProcessingScaler(0,1);

        //创建和初始化ImageRecordReader,为图像记录加载器指定高宽度,调整数据集中所有图像尺寸
        ImageRecordReader recordReader=new ImageRecordReader(height,width,nChannels,labelMaker);

        //使用训练集数据和转换器初始化记录读取器
        recordReader.initialize(trainData);//原始训练集

        numLabels=recordReader.numLabels();
        System.out.println(numLabels);
        //构造训练迭代器
        DataSetIterator trainIter=new RecordReaderDataSetIterator(recordReader,batchSize,1,numLabels);//原始

        scaler.fit(trainIter);//标准化
        trainIter.setPreProcessor(scaler);

        System.out.println("是都支持重置"+trainIter.resetSupported());

        //test iterator
        ImageRecordReader testrr=new ImageRecordReader(height,width,nChannels,labelMaker);
        testrr.initialize(testData);

        DataSetIterator testIter=new RecordReaderDataSetIterator(recordReader,batchSize,1,numLabels);
        scaler.fit(testIter);
        testIter.setPreProcessor(scaler);


        log.info("Build model");
        MultiLayerNetwork network=alexnenModel();
        network.init();

        //初始化用户界面后端
        VertxUIServer uiServer = VertxUIServer.getInstance();
        uiServer.start();
        //设置网络信息(随时间变化的梯度、分值等)的存储位置。这里将其存储于内存。
        //StatsStorage statsStorage=new InMemoryStatsStorage();
        StatsStorage statsStorage = new FileStatsStorage(new File("ui/ui319iterReset.dl4j"));         //或者: new FileStatsStorage(File),用于后续的保存和载入        //将StatsStorage实例连接至用户界面,让StatsStorage的内容能够被可视化
        uiServer.attach(statsStorage);

        log.info("Train model ......");

        // 添加监听
        network.setListeners(new StatsListener( statsStorage), new ScoreIterationListener(1), new EvaluativeListener(testIter, 1, InvocationType.EPOCH_END));

        network.fit(trainIter,nEpochs);
        trainIter.reset();
        //有关如何使用经过训练的模型获得预测结果的示例。 打印了minibatch中第一个示例的结果
        DataSet testDataSet = trainIter.next();
        List<String> allClassLabels =recordReader.getLabels();
        int labelIndex = testDataSet.getLabels().argMax(1).getInt(0);
        int[] predictedClasses = network.predict(testDataSet.getFeatures());
        String expectedResult = allClassLabels.get(labelIndex);
        String modelPrediction = allClassLabels.get(predictedClasses[0]);
        System.out.print("\nFor a single example that is labeled " + expectedResult + " the model predicted " + modelPrediction + "\n\n");


        ModelSerializer.writeModel(network,new File("model/AlexNet.zip"),true,scaler);

        log.info("****************Example finished********************");
    }



    /**
     * 构建神经网络
     */
    private static ConvolutionLayer convInit(String name, int in, int out, int[] kernel, int[] stride, int[] pad, double bias) {
        return new ConvolutionLayer.Builder(kernel, stride, pad).name(name).nIn(in).nOut(out).biasInit(bias).build();
    }

    private static ConvolutionLayer conv3x3(String name, int out, double bias) {
        return new ConvolutionLayer.Builder(new int[]{3,3}, new int[] {1,1}, new int[] {1,1}).name(name).nOut(out).biasInit(bias).build();
    }
    private static ConvolutionLayer conv5x5(String name, int out, int[] stride, int[] pad, double bias) {
        return new ConvolutionLayer.Builder(new int[]{5,5}, stride, pad).name(name).nOut(out).biasInit(bias).build();
    }

    private static SubsamplingLayer maxPool(String name, int[] kernel) {
        return new SubsamplingLayer.Builder(kernel, new int[]{2,2}).name(name).build();
    }

    private static DenseLayer fullyConnected(String name, int out, double bias, double dropOut, Distribution dist) {
        return new DenseLayer.Builder()
                .name(name)
                .nOut(out)
                .biasInit(bias)
                .dropOut(dropOut)
                .weightInit(new WeightInitDistribution(dist))
                .build();
    }
    private static MultiLayerNetwork alexnenModel() {

        double nonZeroBias = 1;
        double dropOut = 0.8;//80概率保留
        log.info("Build model......");
        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                .seed(seed)
                .weightInit(new NormalDistribution(0.0, 0.01))
                .activation(Activation.LEAKYRELU)
                .updater(new Nesterovs(new StepSchedule(ScheduleType.ITERATION, 0.1, 0.01, 10000), 0.9))//nesterovs 动量,1000次后降到0.01,改0.1,10000
                .biasUpdater(new Nesterovs(new StepSchedule(ScheduleType.ITERATION, 0.2, 0.1, 10000),0.9))

                .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer) // 归一化以防止梯度消失或爆炸
                .l2(1e-4)
                .list()
                .layer(convInit("cnn1", nChannels, 96, new int[]{11, 11}, new int[]{4, 4}, new int[]{3, 3}, 0))
                .layer(new LocalResponseNormalization.Builder().name("lrn1").build())
                .layer(maxPool("maxpool1", new int[]{3,3}))
                .layer(conv5x5("cnn2", 256, new int[] {1,1}, new int[] {2,2}, nonZeroBias))
                .layer(new LocalResponseNormalization.Builder().name("lrn2").build())
                .layer(maxPool("maxpool2", new int[]{3,3}))
                .layer(conv3x3("cnn3", 384, 0))
                .layer(conv3x3("cnn4", 384, nonZeroBias))
                .layer(conv3x3("cnn5", 256, nonZeroBias))
                .layer(maxPool("maxpool3", new int[]{3,3}))
                .layer(fullyConnected("ffn1", 4096, nonZeroBias, dropOut, new NormalDistribution(0, 0.005)))
                .layer(fullyConnected("ffn2", 4096, nonZeroBias, dropOut, new NormalDistribution(0, 0.005)))
                .layer(new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
                        .name("output")
                        .nOut(numLabels)
                        .activation(Activation.SOFTMAX)
                        .build())
                .setInputType(InputType.convolutional(height, width, nChannels))
                .build();

        return new MultiLayerNetwork(conf);

    }

}

Can you share your actual stacktrace with us, please?

Also, as I originally said in https://github.com/eclipse/deeplearning4j/issues/8795#issuecomment-601090470 it is most likely because it can’t find any data in the folder you have specified