This can change between types of networks but here is an example. I just used auto complete to figure this out if you get stuck and have to make some tweaks.
String s = net.getOutputLayer().conf().toString();
String l = net.getOutputLayer().toString();
System.out.println(l);
System.out.println(s);
output
org.deeplearning4j.nn.layers.OutputLayer{conf=NeuralNetConfiguration(layer=OutputLayer(super=BaseOutputLayer(super=FeedForwardLayer(super=BaseLayer(activationFn=identity, weightInitFn=org.deeplearning4j.nn.weights.WeightInitXavier@1, biasInit=0.0, gainInit=1.0, regularization=, regularizationBias=, iUpdater=Nesterovs(learningRate=0.01, learningRateSchedule=null, momentum=0.5, momentumISchedule=null, momentumSchedule=null), biasUpdater=null, weightNoise=null, gradientNormalization=None, gradientNormalizationThreshold=1.0), nIn=50, nOut=1), lossFn=LossL2(), hasBias=true)), miniBatch=true, maxNumLineSearchIterations=5, seed=12345, optimizationAlgo=STOCHASTIC_GRADIENT_DESCENT, variables=[W, b], stepFunction=null, minimize=true, cacheMode=NONE, dataType=FLOAT, iterationCount=0, epochCount=0), score=0.0, optimizer=null, listeners=[ScoreIterationListener(5)]}
NeuralNetConfiguration(layer=OutputLayer(super=BaseOutputLayer(super=FeedForwardLayer(super=BaseLayer(activationFn=identity, weightInitFn=org.deeplearning4j.nn.weights.WeightInitXavier@1, biasInit=0.0, gainInit=1.0, regularization=, regularizationBias=, iUpdater=Nesterovs(learningRate=0.01, learningRateSchedule=null, momentum=0.5, momentumISchedule=null, momentumSchedule=null), biasUpdater=null, weightNoise=null, gradientNormalization=None, gradientNormalizationThreshold=1.0), nIn=50, nOut=1), lossFn=LossL2(), hasBias=true)), miniBatch=true, maxNumLineSearchIterations=5, seed=12345, optimizationAlgo=STOCHASTIC_GRADIENT_DESCENT, variables=[W, b], stepFunction=null, minimize=true, cacheMode=NONE, dataType=FLOAT, iterationCount=0, epochCount=0)