neo.py 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174
  1. import numpy as np
  2. import os
  3. import json
  4. import mne
  5. import glob
  6. import pyedflib
  7. from .utils import upsample_events
  8. from settings.config import settings
  9. FINGERMODEL_IDS = settings.FINGERMODEL_IDS
  10. FINGERMODEL_IDS_INVERSE = settings.FINGERMODEL_IDS_INVERSE
  11. CONFIG_INFO = settings.CONFIG_INFO
  12. def raw_loader(data_root, session_paths:dict,
  13. do_rereference=True,
  14. upsampled_epoch_length=1.,
  15. ori_epoch_length=5):
  16. """
  17. Params:
  18. data_root:
  19. session_paths: dict of lists
  20. do_rereference (bool): do common average rereference or not
  21. upsampled_epoch_length (None or float): None: do not do upsampling
  22. ori_epoch_length (int or 'varied'): original epoch length in second
  23. """
  24. raws_loaded = load_sessions(data_root, session_paths, do_rereference)
  25. # process event
  26. raws = []
  27. event_id = {}
  28. for (finger_model, raw) in raws_loaded:
  29. fs = raw.info['sfreq']
  30. {d: int(d) for d in np.unique(raw.annotations.description)}
  31. events, _ = mne.events_from_annotations(raw, event_id={d: int(d) for d in np.unique(raw.annotations.description)})
  32. event_id = event_id | {FINGERMODEL_IDS_INVERSE[int(d)]: int(d) for d in np.unique(raw.annotations.description)}
  33. if isinstance(ori_epoch_length, int) or isinstance(ori_epoch_length, float):
  34. trial_duration = ori_epoch_length
  35. elif ori_epoch_length == 'varied':
  36. trial_duration = None
  37. else:
  38. raise ValueError(f'Unsupported epoch_length {ori_epoch_length}')
  39. events = reconstruct_events(events, fs,
  40. trial_duration=trial_duration)
  41. if upsampled_epoch_length is not None:
  42. events = upsample_events(events, int(fs * upsampled_epoch_length))
  43. event_desc = {e: FINGERMODEL_IDS_INVERSE[e] for e in np.unique(events[:, 2])}
  44. annotations = mne.annotations_from_events(events, fs, event_desc)
  45. raw.set_annotations(annotations)
  46. raws.append(raw)
  47. raws = mne.concatenate_raws(raws)
  48. raws.load_data()
  49. return raws, event_id
  50. def preprocessing(raw, do_rereference=True):
  51. raw.load_data()
  52. if do_rereference:
  53. # common average
  54. raw.set_eeg_reference('average')
  55. # high pass
  56. raw = raw.filter(1, None)
  57. # filter 50Hz
  58. raw = raw.notch_filter([50, 100, 150], trans_bandwidth=3, verbose=False)
  59. return raw
  60. def reconstruct_events(events, fs, trial_duration=5):
  61. """重构出事件序列中的单独运动事件
  62. Args:
  63. events (np.ndarray):
  64. fs (float):
  65. trial_duration (float or None or dict): None means variable epoch length, dict means there are different trial durations for different trials
  66. """
  67. # Trial duration are fixed to be ? seconds.
  68. # extract trials
  69. trials_ind_deduplicated = np.flatnonzero(np.diff(events[:, 2], prepend=0) != 0)
  70. events_new = events[trials_ind_deduplicated]
  71. if trial_duration is None:
  72. events_new[:-1, 1] = np.diff(events_new[:, 0])
  73. events_new[-1, 1] = events[-1, 0] - events_new[-1, 0]
  74. elif isinstance(trial_duration, dict):
  75. for e in trial_duration.keys():
  76. events_new[events_new[:, 2] == e] = trial_duration[e]
  77. else:
  78. events_new[:, 1] = int(trial_duration * fs)
  79. return events_new
  80. def load_sessions(data_root, session_names: dict, do_rereference=True):
  81. # return raws for different finger models on an interleaved manner
  82. raw_cnt = sum(len(session_names[k]) for k in session_names)
  83. raws = []
  84. i = 0
  85. while i < raw_cnt:
  86. for finger_model in session_names.keys():
  87. try:
  88. s = session_names[finger_model].pop(0)
  89. i += 1
  90. except IndexError:
  91. continue
  92. if glob.glob(os.path.join(data_root, s, 'evt.bdf')):
  93. # neo format
  94. raw = load_neuracle(os.path.join(data_root, s))
  95. else:
  96. # kraken format
  97. data_file = glob.glob(os.path.join(data_root, s, '*.bdf'))[0]
  98. raw = mne.io.read_raw_bdf(data_file)
  99. # preprocess raw
  100. raw = preprocessing(raw, do_rereference)
  101. # append list
  102. raws.append((finger_model, raw))
  103. return raws
  104. def load_neuracle(data_dir, data_type='ecog'):
  105. """
  106. neuracle file loader
  107. :param
  108. data_dir: root data dir for the experiment
  109. sfreq:
  110. data_type:
  111. :return:
  112. raw: mne.io.RawArray
  113. """
  114. f = {
  115. 'data': os.path.join(data_dir, 'data.bdf'),
  116. 'evt': os.path.join(data_dir, 'evt.bdf'),
  117. 'info': os.path.join(data_dir, 'recordInformation.json')
  118. }
  119. # read json
  120. with open(f['info'], 'r') as json_file:
  121. record_info = json.load(json_file)
  122. start_time_point = record_info['DataFileInformations'][0]['BeginTimeStamp']
  123. sfreq = record_info['SampleRate']
  124. # read data
  125. f_data = pyedflib.EdfReader(f['data'])
  126. ch_names = f_data.getSignalLabels()
  127. data = np.array([f_data.readSignal(i) for i in range(f_data.signals_in_file)]) * 1e-6 # to Volt
  128. info = mne.create_info(ch_names, sfreq, [data_type] * len(ch_names))
  129. raw = mne.io.RawArray(data, info)
  130. # read event
  131. try:
  132. f_evt = pyedflib.EdfReader(f['evt'])
  133. onset, duration, content = f_evt.readAnnotations()
  134. onset = np.array(onset) - start_time_point * 1e-3 # correct by start time point
  135. onset = (onset * sfreq).astype(np.int64)
  136. try:
  137. content = content.astype(np.int64) # use original event code
  138. except ValueError:
  139. event_mapping = {c: i for i, c in enumerate(np.unique(content))}
  140. content = [event_mapping[i] for i in content]
  141. duration = (np.array(duration) * sfreq).astype(np.int64)
  142. events = np.stack((onset, duration, content), axis=1)
  143. annotations = mne.annotations_from_events(events, sfreq)
  144. raw.set_annotations(annotations)
  145. except OSError:
  146. pass
  147. return raw