diff --git a/ADwinProII/ADwin_utils.py b/ADwinProII/ADwin_utils.py
index fa1d2f41eaf3f9d62313bb05577763dec6f206d9..33f12841b4bf3c3d4760fd8ebf22f19cbda4d931 100644
--- a/ADwinProII/ADwin_utils.py
+++ b/ADwinProII/ADwin_utils.py
@@ -156,7 +156,11 @@ def get_ain_traces(h5file, raw_data_name="ADwinAnalogIn_DATA", convert_data=True
                 times = np.arange(acquisition_times["start_time"][i],acquisition_times["stop_time"][i],int(clock_rate//acquisition_times["storage_rate"][i])) / clock_rate
             else:
                 times = np.arange(acquisition_times["start_time"][i],acquisition_times["stop_time"][i]) / clock_rate
-            # print(times.size,acquisition.size)
+            if "waits" in f["data"]:
+                # There was a wait in the experiment, let's offset the times such that they are accurate
+                waits = f["data/waits"][:]
+                for i in range(len(waits)):
+                    times[times > waits["time"][i]] += waits["duration"][i]
             if write_hdf5:
                 data = np.rec.fromarrays([times, acquisition], dtype=dtype)
                 group.create_dataset(label, compression = config.compression, data = data)
@@ -202,5 +206,10 @@ def get_aout_trace(h5file, output):
         data = f["devices/ADwin/ANALOG_OUT/VALUES"][:]
         mask = (data["channel"] == port)
         t = np.round(PROCESSDELAY / CLOCK_T12 * data["n_cycles"][mask], 9)
+        if "data" in f and "waits" in f["data"]:
+                # There was a wait in the experiment, let's offset the times such that they are accurate
+                waits = f["data/waits"][:]
+                for i in range(len(waits)):
+                    t[t > waits["time"][i]] += waits["duration"][i]
         values = DAC(data["value"][mask])
     return t, values
\ No newline at end of file
diff --git a/ADwinProII/blacs_workers.py b/ADwinProII/blacs_workers.py
index c2db3c2ee7ed954cb08d42f6a2964c08110c8c38..ef8ce38bd18e221ed625e1eb4a67e5c3cf14d68b 100644
--- a/ADwinProII/blacs_workers.py
+++ b/ADwinProII/blacs_workers.py
@@ -142,6 +142,9 @@ class ADwinProIIWorker(Worker):
             self.stop_time = group.attrs["stop_time"]
             # Send stop time to ADwin
             self.adw.Set_Par(2, int(self.stop_time * CLOCK_T12 / self.PROCESSDELAY))
+            # Send wait time and timeout to ADwin (default 0 if no waits)
+            self.adw.Set_Par(3, int(group.attrs.get("wait_time",-1)))
+            self.adw.Set_Par(5, int(group.attrs.get("wait_timeout",0)))
             # Send data to ADwin
             AOUT = group["ANALOG_OUT/VALUES"]
             if fresh or not np.array_equal(AOUT[:],self.smart_cache["AOUT"]):
@@ -157,6 +160,7 @@ class ADwinProIIWorker(Worker):
                     self.smart_cache[name] = DOUT[:]
                     self.adw.SetData_Long(DOUT["n_cycles"], module,   1, DOUT.shape[0])
                     self.adw.SetData_Long(DOUT["bitfield"], module+1, 1, DOUT.shape[0])
+                    self.adw.Set_Par(module-1, int(DOUT.attrs.get("wait_time",-1)))
             PIDs = group["ANALOG_OUT/PID_CHANNELS"]
             if fresh or not np.array_equal(PIDs[:],self.smart_cache["PIDs"]):
                 print("PIDs programmed.")
@@ -207,6 +211,19 @@ class ADwinProIIWorker(Worker):
             # array["values"] = workload_data
             # group.create_dataset("ADwin_Workload", compression = config.compression, data = array)
             # f['devices/ADwin/ANALOG_IN'].attrs["ADwin_Workload"] = "TEST"
+
+            # Get wait duration
+            if f[f"devices/{self.device_name}"].attrs.get("wait_time", None) is not None:
+                wait_duration = self.adw.Get_Par(4) / CLOCK_T12 * self.PROCESSDELAY
+                wait_table = f["waits"]
+                dtypes = [('label', 'a256'),('time', float),('timeout', float),('duration', float),('timed_out', bool)]
+                data = np.empty(len(wait_table), dtype=dtypes)
+                data['label'] = wait_table['label']
+                data['time'] = wait_table['time']
+                data['timeout'] = wait_table['timeout']
+                data['duration'] = wait_duration
+                data['timed_out'] = wait_duration > wait_table['timeout']
+                f.create_dataset('/data/waits', data=data)
         # Delete h5file from worker, shot is finished
         self.h5file = None
         # Check if the TiCo processes were running correctly
diff --git a/ADwinProII/labscript_devices.py b/ADwinProII/labscript_devices.py
index f74a85b4056302be67bca20f1bfae9040378ece7..8fddb6b04337e52a164d89ef98452ca0daf93fc8 100644
--- a/ADwinProII/labscript_devices.py
+++ b/ADwinProII/labscript_devices.py
@@ -14,7 +14,7 @@
 ############################################################################
 
 
-from labscript import Pseudoclock, PseudoclockDevice, ClockLine, Device, LabscriptError, config, set_passed_properties
+from labscript import Pseudoclock, PseudoclockDevice, ClockLine, Device, LabscriptError, config, set_passed_properties, compiler
 import numpy as np
 
 from . import PROCESSDELAY_T12, PROCESSDELAY_TiCo, CLOCK_T12, CLOCK_TiCo, MAX_EVENTS, MAX_PID_EVENTS, A_IN_BUFFER
@@ -198,8 +198,10 @@ class ADwinProII(PseudoclockDevice):
     
 
     def do_checks(self, outputs):
-        if self.trigger_times != [0]:
-            raise NotImplementedError('ADwin does not support retriggering or waiting.')
+        if len(self.trigger_times)>1:
+            # Either only one software trigger (for starting shot as master pseudoclock), or two (where the second one is for the 'wait')
+            if len(self.trigger_times)==2 and self.trigger_times[1] not in compiler.wait_table:
+                raise NotImplementedError('ADwin does not support retriggering, and only supports one "wait" in the current implementation.')
         for output in outputs:
             output.do_checks(self.trigger_times)
 
@@ -242,6 +244,8 @@ class ADwinProII(PseudoclockDevice):
                 PID_config.append(device.PID_config)
             elif isinstance(device,ADwinDIO32):
                 group.create_dataset("DIGITAL_OUT/"+device.name, data=device.digital_data)
+                if compiler.wait_table:
+                    group["DIGITAL_OUT/"+device.name].attrs["wait_time"] = round( list(compiler.wait_table)[0] * device.parent_clock_line.clock_limit )
             elif isinstance(device,ADwinAI8):
                 # For the AIN table it's required that self.modules is sorted correctly!
                 analog_input.append(device.AIN_times)
@@ -312,4 +316,13 @@ class ADwinProII(PseudoclockDevice):
         # Save list of module names to connection table properties
         module_dict = {str(module.module_address) : module.name for module in self.modules}
         self.set_property("modules", module_dict, "connection_table_properties")
+
+        # Add wait time and timeout in units of ADwin process cycles
+        if len(compiler.wait_table)>1:
+            raise LabscriptError("ADwin supports only a sinlge wait for now!")
+        for time,args in compiler.wait_table.items():
+            hdf5_file[f"devices/{self.name}"].attrs["wait_time"] = round(time * self._pseudoclock_T12.clock_limit)
+            hdf5_file[f"devices/{self.name}"].attrs["wait_timeout"] = round(args[1] * self._pseudoclock_T12.clock_limit)
+            for TiCo in self.TiCos:
+                hdf5_file[f"devices/{self.name}/DIGITAL_OUT/"].attrs["wait_time"] = round(time * self.TiCos[TiCo].clock_limit)
         
\ No newline at end of file
diff --git a/DCAMCamera/blacs_tabs.py b/DCAMCamera/blacs_tabs.py
index b6acf27e7c61683a70dd924f33f27ad71663444d..ac3d46fd95f8ce2453727b0105a161608861ee59 100644
--- a/DCAMCamera/blacs_tabs.py
+++ b/DCAMCamera/blacs_tabs.py
@@ -3,11 +3,14 @@
 # /user_devices/DCAMCamera/blacs_tabs.py                            #
 #                                                                   #
 # Jan 2023, Marvin Holten                                           #
+# 2025, Johannes Schabbauer                                         #
 #                                                                   #
 #                                                                   #
 #####################################################################
 
 from labscript_devices.IMAQdxCamera.blacs_tabs import IMAQdxCameraTab
+import labscript_utils.h5_lock
+import h5py
 
 class DCAMCameraTab(IMAQdxCameraTab):
     """Thin sub-class of obj:`IMAQdxCameraTab`.
@@ -16,4 +19,30 @@ class DCAMCameraTab(IMAQdxCameraTab):
     :obj:`DCAMCameraWorker`."""
     
     # override worker class
-    worker_class = 'user_devices.DCAMCamera.blacs_workers.DCAMCameraWorker'
\ No newline at end of file
+    worker_class = 'user_devices.DCAMCamera.blacs_workers.DCAMCameraWorker'
+
+    def initialise_workers(self):
+        table = self.settings['connection_table']
+        connection_table_properties = table.find_by_name(self.device_name).properties
+        # The device properties can vary on a shot-by-shot basis, but at startup we will
+        # initially set the values that are configured in the connection table, so they
+        # can be used for manual mode acquisition:
+        with h5py.File(table.filepath, 'r') as f:
+            device_properties = labscript_utils.properties.get(
+                f, self.device_name, "device_properties"
+            )
+        worker_initialisation_kwargs = {
+            'serial_number': connection_table_properties['serial_number'],
+            'orientation': connection_table_properties['orientation'],
+            'camera_attributes': device_properties['camera_attributes'],
+            'manual_mode_camera_attributes': connection_table_properties[
+                'manual_mode_camera_attributes'
+            ],
+            'mock': connection_table_properties['mock'],
+            'image_receiver_port': self.image_receiver.port,
+            'occupation_receiver_port' : connection_table_properties.get('occupation_receiver_port',None)
+        }
+        self.create_worker(
+            'main_worker', self.worker_class, worker_initialisation_kwargs
+        )
+        self.primary_worker = "main_worker"
\ No newline at end of file
diff --git a/DCAMCamera/blacs_workers.py b/DCAMCamera/blacs_workers.py
index 03bacb39cf5631a3dcd876a617cbccdc599cf8ea..4cfd035ba9e51089e63fd9da8202dc48ff4915dd 100644
--- a/DCAMCamera/blacs_workers.py
+++ b/DCAMCamera/blacs_workers.py
@@ -3,11 +3,20 @@
 # /user_devices/DCAMCamera/blacs_workers.py                         #
 #                                                                   #
 # Jan 2023, Marvin Holten                                           #
+# 2025, Johannes Schabbauer                                         #
 #                                                                   #
 #                                                                   #
 #####################################################################
 
 from labscript_devices.IMAQdxCamera.blacs_workers import IMAQdxCameraWorker
+import threading
+import numpy as np
+import labscript_utils.h5_lock
+import h5py
+import labscript_utils.properties
+import zmq
+from labscript_utils.ls_zprocess import Context
+from labscript_utils.shared_drive import path_to_local
 
 # Don't import API yet so as not to throw an error, allow worker to run as a dummy
 # device, or for subclasses to import this module to inherit classes without requiring API
@@ -204,7 +213,7 @@ class DCAM_Camera(object):
                 
         return image
 
-    def grab_multiple(self, n_images, images):
+    def grab_multiple(self, n_images, images, tweezer_socket=None, tweezer_img_no=None, bright_img_no=None, get_occupation=None):
         """Grab n_images into images array during buffered acquistion.
         
         Grab method involves a continuous loop with fast timeout in order to
@@ -222,6 +231,19 @@ class DCAM_Camera(object):
                 self._abort_acquisition = False
                 return
             images.append(self.grab(bufferNo=i))
+            # Send image to occupation receiver
+            if tweezer_socket and tweezer_img_no==i:
+                if bright_img_no is not None:
+                    occupation = get_occupation(np.array(images[-1],dtype=np.int32)-np.array(images[bright_img_no],dtype=np.int32))
+                else:
+                    occupation = get_occupation(images[-1]-200)
+                metadata = dict(dtype=str(occupation.dtype), shape=occupation.shape)
+                tweezer_socket.send_json(metadata, zmq.SNDMORE)
+                tweezer_socket.send(occupation, copy=False)
+                print(f"Trying to send image {len(images)} to occupation receiver...", end="\r")
+                response = tweezer_socket.recv()
+                assert response == b'ok', response
+                print(f"Sent occupation for image {len(images)} to occupation receiver.")
             print(f"Got image {i+1} of {n_images}.")                    
 
         print(f"Got {len(images)} of {n_images} images.")
@@ -248,6 +270,16 @@ class DCAMCameraWorker(IMAQdxCameraWorker):
     :obj:`get_attributes_as_dict` to use DCAMCameraWorker.get_attributes() method."""
     interface_class = DCAM_Camera
 
+    def init(self):
+        super().init()
+
+        # Connect to occupation matrix receiver port for conditional Tweezer programming
+        if self.occupation_receiver_port is not None:
+            self.tweezer_socket = Context().socket(zmq.REQ)
+            self.tweezer_socket.connect(
+                f'tcp://{self.parent_host}:{self.occupation_receiver_port}'
+            )
+
     def get_attributes_as_dict(self, visibility_level):
         """Return a dict of the attributes of the camera for the given visibility
         level
@@ -260,5 +292,91 @@ class DCAMCameraWorker(IMAQdxCameraWorker):
             return IMAQdxCameraWorker.get_attributes_as_dict(self,visibility_level)
         else:
             return self.camera.get_attributes(visibility_level)
+        
+    def transition_to_buffered(self, device_name, h5_filepath, initial_values, fresh):
+        if getattr(self, 'is_remote', False):
+            h5_filepath = path_to_local(h5_filepath)
+        if self.continuous_thread is not None:
+            # Pause continuous acquistion during transition_to_buffered:
+            self.stop_continuous(pause=True)
+        with h5py.File(h5_filepath, 'r') as f:
+            group = f['devices'][self.device_name]
+            if not 'EXPOSURES' in group:
+                return {}
+            self.h5_filepath = h5_filepath
+            self.exposures = group['EXPOSURES'][:]
+            self.n_images = len(self.exposures)
+
+            # Get the camera_attributes from the device_properties
+            properties = labscript_utils.properties.get(
+                f, self.device_name, 'device_properties'
+            )
+            camera_attributes = properties['camera_attributes']
+            self.stop_acquisition_timeout = properties['stop_acquisition_timeout']
+            self.exception_on_failed_shot = properties['exception_on_failed_shot']
+            saved_attr_level = properties['saved_attribute_visibility_level']
+            self.camera.exception_on_failed_shot = self.exception_on_failed_shot
+
+
+            ### ADDED CODE TO PASS OCCUPATION RECEIVER ARGUMENTS TO grab_multiple() ###
+            self.images = []
+            if properties.get("occupation_receiver_image_index", False):
+                tweezers_centers = f["globals"].attrs["tweezers_centers"]
+                thresholds = f["globals"].attrs["tweezers_thresholds"]
+                ROI_size = f["globals"].attrs["tweezers_ROI"]
+                get_occupation = lambda image: self.get_occupation(image,tweezers_centers,thresholds, ROI_size)
+                args = (self.n_images, self.images,self.tweezer_socket, properties["occupation_receiver_image_index"], properties.get("occupation_receiver_bright_index",None), get_occupation)
+            else:
+                # Standard args from IMAQdxCameraWorker
+                args = (self.n_images, self.images)
+
+            
+        # Only reprogram attributes that differ from those last programmed in, or all of
+        # them if a fresh reprogramming was requested:
+        if fresh:
+            self.smart_cache = {}
+        self.set_attributes_smart(camera_attributes)
+        # Get the camera attributes, so that we can save them to the H5 file:
+        if saved_attr_level is not None:
+            self.attributes_to_save = self.get_attributes_as_dict(saved_attr_level)
+        else:
+            self.attributes_to_save = None
+        print(f"Configuring camera for {self.n_images} images.")
+        self.camera.configure_acquisition(continuous=False, bufferCount=self.n_images)
+        
 
+        self.acquisition_thread = threading.Thread(
+            target=self.camera.grab_multiple,
+            args=args,
+            daemon=True,
+        )
+        self.acquisition_thread.start()
+        return {}
+
+    def get_occupation(self, image, tweezer_centers, thresholds, size_px=3):
+        """
+        Calculate the Tweezer occupancy of Tweezers, either 0 or 1.
+        
+        Parameters
+        ----------
+        image : ndarray
+            Monochrome picture.
+        tweezers_centers : ndarray
+            Indices where the Tweezers aer in the image
+        thresholds : float or ndarray
+            Threshold in px counts, that distinghishes no atom or 1 atom.
+            Same threashold for all Tweezers, if a single number. If ndarray,
+            the size has to be equal to the size of tweezers_centers.
+        size_px : int
+            ROI around each tweezer center, where the pixels get summed up.
+        """
+        px_sum = np.zeros(tweezer_centers.shape[0])
+        lower = size_px//2
+        upper = size_px-lower
+        for i in range(tweezer_centers.shape[0]):
+            x,y = tweezer_centers[i,:]
+            px_sum[i] = image[x-lower:x+upper, y-lower:y+upper].sum()
+
+        return px_sum > thresholds
+        
 
diff --git a/DCAMCamera/labscript_devices.py b/DCAMCamera/labscript_devices.py
index 79a9f2689a2964f2cbb447dd2c032d50eb1f07de..584545946e8cefa5b50669523139d93c100c8692 100644
--- a/DCAMCamera/labscript_devices.py
+++ b/DCAMCamera/labscript_devices.py
@@ -3,10 +3,14 @@
 # /user_devices/DCAMCamera/labscript_devices.py                     #
 #                                                                   #
 # Jan 2023, Marvin Holten                                           #
+# 2025, Johannes Schabbauer                                         #
+
 #                                                                   #
 #                                                                   #
 #####################################################################
 
+import h5py
+from labscript import set_passed_properties, LabscriptError
 from labscript_devices.IMAQdxCamera.labscript_devices import IMAQdxCamera
 
 class DCAMCamera(IMAQdxCamera):
@@ -14,3 +18,38 @@ class DCAMCamera(IMAQdxCamera):
     
     description = 'DCAM Camera'
 
+    @set_passed_properties(
+        property_names={
+            "connection_table_properties": [
+                "occupation_receiver_port",
+            ],
+        }
+    )
+    def __init__(self, name, parent_device, connection, serial_number, orientation=None, pixel_size=..., magnification=1, trigger_edge_type='rising', trigger_duration=None, minimum_recovery_time=0, camera_attributes=None, manual_mode_camera_attributes=None, stop_acquisition_timeout=5, exception_on_failed_shot=True, saved_attribute_visibility_level='intermediate', occupation_receiver_port=None, mock=False, **kwargs):
+        self.occupation_receiver_image_index = None
+        self.exposure_times = []
+        super().__init__(name, parent_device, connection, serial_number, orientation, pixel_size, magnification, trigger_edge_type, trigger_duration, minimum_recovery_time, camera_attributes, manual_mode_camera_attributes, stop_acquisition_timeout, exception_on_failed_shot, saved_attribute_visibility_level, mock, **kwargs)
+
+    def expose(self, t, name, frametype='frame', trigger_duration=None, occupation_receiver=False):
+        self.exposure_times.append(t)
+        if occupation_receiver:
+            if getattr(self,"occupation_receiver_time_"+occupation_receiver,None) is not None:
+                raise LabscriptError(f"{self.name}: In the current implementation only a single image and bright pic can be sent to the 'occupation receiver'")
+            if occupation_receiver not in ["atoms","bright"]:
+                raise LabscriptError(f"'occupation_receiver' can only be 'atoms' or 'bright'.")
+            setattr(self,"occupation_receiver_time_"+occupation_receiver, t)
+        
+        return super().expose(t, name, frametype, trigger_duration)
+
+    def generate_code(self, hdf5_file):
+        super().generate_code(hdf5_file)
+        if getattr(self,"occupation_receiver_time_atoms",None) is not None:
+            self.occupation_receiver_image_index = self.exposure_times.index(self.occupation_receiver_time_atoms)
+            hdf5_file[f"devices/{self.name}"].attrs["occupation_receiver_image_index"] = self.occupation_receiver_image_index
+            if getattr(self,"occupation_receiver_time_bright",None) is not None:
+                self.occupation_receiver_bright_index = self.exposure_times.index(self.occupation_receiver_time_bright)
+                hdf5_file[f"devices/{self.name}"].attrs["occupation_receiver_bright_index"] = self.occupation_receiver_bright_index
+                # Error check: bright pic must be taken before atoms pic if we want to calculate occupoation matrix mid-shot
+                if self.occupation_receiver_image_index < self.occupation_receiver_bright_index:
+                    raise LabscriptError("When calculation the occupation mid-shot, the bright pic must be taken before the atom pic!")
+            
diff --git a/SpectrumAWG/blacs_workers.py b/SpectrumAWG/blacs_workers.py
index 9be404a2ee09acd38a136c59599d55d6c44ffad1..82621f327773b28242009e88cef4ab4a4ce72697 100644
--- a/SpectrumAWG/blacs_workers.py
+++ b/SpectrumAWG/blacs_workers.py
@@ -2,7 +2,10 @@ import labscript_utils.h5_lock
 import h5py
 from blacs.tab_base_classes import Worker
 from . import SpectrumCard
-import numpy as np
+try:
+    from pythonlib.conditional_tweezers.OccupationReceiver import OccupationReceiver
+except ImportError:
+    print("Custom Class to reprogram Tweezers not found...")
 
 class SpectrumAWGWorker(Worker):
     def init(self):
@@ -36,6 +39,11 @@ class SpectrumAWGWorker(Worker):
         # Initialize memory for smart programming
         # Keys: hash of instructions, Values: position in memory
         self.smart_cache = {}
+
+        self.occupations = []
+        if getattr(self,"occupation_receiver_port",0):
+            print(f"Creat occupation receiver socket at port {self.occupation_receiver_port}")
+            self.occupation_receiver = OccupationReceiver(self.occupation_receiver_port, self.occupations, self.AWG)
     
     def program_manual(self, values):
         if values is None:
@@ -63,6 +71,8 @@ class SpectrumAWGWorker(Worker):
         write_setup_and_start = False
         with h5py.File(h5_file,'r') as f:
             group = f[f"devices/{device_name}"]
+            function_conditional_programming = group.attrs.get("function_conditional_programming", "")
+            function_conditional_programming_args = group.attrs.get("function_conditional_programming_args", ())
             for ch in self.channels:
                 if fresh or len(self.smart_cache)+len(group[ch].attrs) > self.memory_segments:
                     # Reset smart programming and start writing memory from the beginning
@@ -100,12 +110,13 @@ class SpectrumAWGWorker(Worker):
                         if index_h5 in group[ch]["labels"].attrs:
                             initial_values[memory_index] = group[ch]["labels"].attrs[index_h5]
                         self.AWG.transfer_sequence_replay_samples(memory_index,data)
-                    if index!=last_index:
+                    if index!=last_index or function_conditional_programming:
                         self.AWG.seq_set_sequence_step(index,memory_index,index+1,1,'on_trigger',last_step=False)
                     else:
                         self.AWG.seq_set_sequence_step(index,memory_index,index,1,'on_trigger',last_step=False)
                         # If there is a trigger at the stop time, repeat one last sequence 
-                        # and then stop (is not card_stop() was called already)
+                        # and then stop (if not card_stop() was called already)
+                    if index==last_index:
                         self.AWG.seq_set_sequence_step(index+1,memory_index,0,1,'always',last_step=True)
                 # ONLY IMPLEMENTED FOR ONE CHANNEL, IF TWO CHANNELS ARE NEEDED WE ALREADY GET AN ERROR IN LABSCRIPT
                 # FOR IMPLEMENTATION ONE HASE TO INTERWEAVE THE DATA FOR BOTH CHANNELS
@@ -116,6 +127,19 @@ class SpectrumAWGWorker(Worker):
             self.AWG.card_write_setup() # TODO: Do we have to call that every shot or just once after the initialization?
             self.AWG.card_start()
             self.AWG.card_enable_trigger()
+
+        if function_conditional_programming:
+            print("Start conditional programming of AWG...")
+            if not (len(instruction)-1)%3 == 0:
+                raise RuntimeError("Current implementation only supports multitone for conditional programming")
+            num_samples = int(instruction[0])
+            num_tones = (len(instruction)-1)//3
+            freq = instruction[1:num_tones+1]
+            ampl = instruction[num_tones+1:2*num_tones+1]
+            phase= instruction[2*num_tones+1:]
+            self.occupation_receiver.set_function(function_conditional_programming, function_conditional_programming_args)
+            self.occupation_receiver.last_tweezer_params(index+1, len(self.smart_cache), freq, ampl, phase, num_samples, self.sample_rate)
+
         return initial_values
 
     def transition_to_manual(self):
diff --git a/SpectrumAWG/labscript_devices.py b/SpectrumAWG/labscript_devices.py
index 4dd15247dab759ac20f5e76cf0bc0d98663f35d5..84ce9028bce44a7f14fd1f219989629fd6c32b28 100644
--- a/SpectrumAWG/labscript_devices.py
+++ b/SpectrumAWG/labscript_devices.py
@@ -121,11 +121,11 @@ class SpectrumAWG(Device):
     allowed_children = [AWGOutput]
 
     @set_passed_properties(
-        property_names={"connection_table_properties": ["device_path","timeout","external_clock_rate","sample_rate","memory_segments"],
+        property_names={"connection_table_properties": ["device_path","timeout","external_clock_rate","sample_rate","memory_segments","occupation_receiver_port"],
                         "device_properties": []                
         }
         )
-    def __init__(self, name, device_path, sample_rate, external_clock_rate=None, timeout=5000, channel_mode="seq", memory_segments=2**16, **kwargs):
+    def __init__(self, name, device_path, sample_rate, external_clock_rate=None, timeout=5000, channel_mode="seq", memory_segments=2**16, occupation_receiver_port=None, **kwargs):
         """ Create SpectrumAWG instance.
         
         Parameters
@@ -154,6 +154,10 @@ class SpectrumAWG(Device):
         internal_memory = 2**32 # 4GB
         self.max_sample_size = internal_memory//2//memory_segments
 
+    def set_conditional_reprogramming(self, function_name, function_args=None):
+        self.function_conditional_programming = function_name
+        self.function_conditional_programming_args = function_args
+
     def do_checks(self):
         if len(self.child_devices)>1:
             raise NotImplementedError("This code can just handle 1 output channel for now.")
@@ -170,6 +174,9 @@ class SpectrumAWG(Device):
             group.require_group(output.connection)
             group[output.connection].require_group("labels")
 
+            group.attrs["function_conditional_programming"] = getattr(self,"function_conditional_programming","")
+            group.attrs["function_conditional_programming_args"] = getattr(self,"function_conditional_programming_args",tuple())
+
             for i,t in enumerate(np.sort(change_times)):
                 group[output.connection].attrs[str(i)] = output.instructions[t][:-1]
                 if output.instructions[t][-1] is not None: