diff --git a/app/cocal_methods.py b/app/cocal_methods.py
index 1453fe9d26dd7ac5c92d1d63c2ac7db12f7d12b6..d49c55c0a3c6d327c52e481a3810e4d820b5002b 100644
--- a/app/cocal_methods.py
+++ b/app/cocal_methods.py
@@ -437,21 +437,21 @@ class CocalMethods:
         h_comp_unc = real_imag_2_complex(np.sqrt(np.diag(h_comp_cov)))
 
         # regularize
-        #regularized_filter = sig.cheby2()
-        #h_regularized = self.freqz_core()
-        #h_compensated_and_regularized = DFT_multiply(
+        # regularized_filter = sig.cheby2()
+        # h_regularized = self.freqz_core()
+        # h_compensated_and_regularized = DFT_multiply(
         #    h_compensated, h_regularized, h_regularized_co
-        #)
+        # )
 
         # visualize result
         fig, ax = plt.subplots(1, 1, sharex=True, squeeze=False)
-        ax[0,0].plot(
+        ax[0, 0].plot(
             f[mask],
             np.abs(h[mask]),
             label="fitted TF",
             color="blue",
         )
-        ax[0,0].fill_between(
+        ax[0, 0].fill_between(
             f[mask],
             np.abs(h[mask]) - np.abs(h_unc[mask]),
             np.abs(h[mask]) + np.abs(h_unc[mask]),
@@ -459,21 +459,21 @@ class CocalMethods:
             label="unc of fitted TF",
             color="blue",
         )
-        ax[0,0].scatter(
+        ax[0, 0].scatter(
             self.ref_frequency[mask],
             np.abs(h_empirical[mask]),
             label="empirical TF",
             s=2,
             color="black",
         )
-        ax[0,0].scatter(
+        ax[0, 0].scatter(
             self.ref_frequency[mask],
             np.abs(h_comp[mask]),
             label="compensated TF",
             s=1,
             color="red",
         )
-        ax[0,0].fill_between(
+        ax[0, 0].fill_between(
             f[mask],
             np.abs(h_comp[mask]) - np.abs(h_comp_unc[mask]),
             np.abs(h_comp[mask]) + np.abs(h_comp_unc[mask]),
@@ -482,12 +482,12 @@ class CocalMethods:
             color="red",
         )
 
-        ax[0,0].plot(f, np.ones_like(f), "--r", label="ideal")
-        ax[0,0].legend()
-        ax[0,0].set_xscale("log")
-        ax[0,0].set_yscale("log")
-        # plt.savefig(self.result_image_path)
-        plt.show()
+        ax[0, 0].plot(f, np.ones_like(f), "--r", label="ideal")
+        ax[0, 0].legend()
+        ax[0, 0].set_xscale("log")
+        ax[0, 0].set_yscale("log")
+        plt.savefig(self.result_image_path)
+        # plt.show()
 
     def perform_dummy_computations(self):
         self.start_date = datetime.date.today().isoformat()
@@ -702,10 +702,14 @@ class CocalMethods:
         # uncertainty propagation using GUM_DFT not possible, because of ArrayMemoryError:
         # Unable to allocate 18.3 TiB for an array with shape (1584667, 1584667) and data type float64
         # hence reverting to diag-only MC approach
-        draw_samples = lambda size: np.random.normal(
-            self.ref_signal, self.ref_signal_unc, size=(size, self.ref_signal.size)
-        )
-        evaluate = lambda x: complex_2_real_imag(np.fft.fft(x))
+        def draw_samples(size):
+            return np.random.normal(
+                self.ref_signal, self.ref_signal_unc, size=(size, self.ref_signal.size)
+            )
+
+        def evaluate(x):
+            return complex_2_real_imag(np.fft.fft(x))
+
         umc_kwargs = {
             "draw_samples": draw_samples,
             "evaluate": evaluate,
@@ -805,7 +809,7 @@ class CocalMethods:
         mask_ri = np.r_[mask, mask]
 
         # custom weights to reduce influence of higher frequencies
-        weights = np.abs(np.diff(np.log10(np.abs(self.ref_frequency)+1), prepend=1))
+        weights = np.abs(np.diff(np.log10(np.abs(self.ref_frequency) + 1), prepend=1))
 
         # function to draw samples for Monte Carlo
         def draw_samples(size):
@@ -906,13 +910,15 @@ class CocalMethods:
 
         # H_ba = sig.freqz(b, a, w_empirical)[1] # quite slow
         H_ba = self.freqz_core(theta, Nb, w_empirical_exp)  # faster
-        
+
         # compute difference
         # diff = np.linalg.norm(H_ba - H_empirical)  # does not use log-scaling
         # diff = np.linalg.norm(np.log(H_ba) - np.log(H_empirical))
-        diff = np.log(np.abs(H_ba)) - np.log(np.abs(H_empirical)) # only amplitude matters, phase discarded
+        diff = np.log(np.abs(H_ba)) - np.log(
+            np.abs(H_empirical)
+        )  # only amplitude matters, phase discarded
 
         if weights is None:
             return np.linalg.norm(diff)
         else:
-            return np.linalg.norm(diff*weights)
+            return np.linalg.norm(diff * weights)
diff --git a/app/main.py b/app/main.py
index 3e39717ccbec4dbc7a117caec3d8b2651f856986..cfaad7b02822dcbbe2984184117aee8efec35920 100644
--- a/app/main.py
+++ b/app/main.py
@@ -1,11 +1,12 @@
 import datetime
-from pathlib import Path
+import logging
 import uuid
+from pathlib import Path
 
 from fastapi import BackgroundTasks, Depends, FastAPI, UploadFile
 from fastapi.middleware.cors import CORSMiddleware
-from fastapi.responses import FileResponse
 from fastapi.openapi.docs import get_swagger_ui_html
+from fastapi.responses import FileResponse
 from fastapi.staticfiles import StaticFiles
 from sqlalchemy.orm import Session
 
@@ -14,7 +15,6 @@ from .database import SessionLocal, engine
 
 models.Base.metadata.create_all(bind=engine)
 
-import logging
 logger = logging.getLogger("uvicorn")
 
 app = FastAPI(docs_url=None)