validation.py 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
  1. '''
  2. 模型测试脚本,
  3. 测试AUC,
  4. 绘制Confusion matrix, ERSD map
  5. '''
  6. import os
  7. import argparse
  8. import logging
  9. import mne
  10. import yaml
  11. import joblib
  12. import numpy as np
  13. from scipy import signal
  14. from sklearn.metrics import accuracy_score, f1_score
  15. import matplotlib.pyplot as plt
  16. from dataloaders import neo
  17. import bci_core.utils as bci_utils
  18. import bci_core.pipeline as bci_pipeline
  19. import bci_core.viz as bci_viz
  20. from settings.config import settings
  21. logging.basicConfig(level=logging.INFO)
  22. logger = logging.getLogger(__name__)
  23. config_info = settings.CONFIG_INFO
  24. def parse_args():
  25. parser = argparse.ArgumentParser(
  26. description='Model validation'
  27. )
  28. parser.add_argument(
  29. '--subj',
  30. dest='subj',
  31. help='Subject name',
  32. default=None,
  33. type=str
  34. )
  35. parser.add_argument(
  36. '--model-filename',
  37. dest='model_filename',
  38. help='Model filename',
  39. default=None,
  40. type=str
  41. )
  42. return parser.parse_args()
  43. def val_by_epochs(raw, model_path, event_id, trial_duration=1., ):
  44. events, _ = mne.events_from_annotations(raw, event_id=event_id)
  45. # parse model type
  46. models = joblib.load(model_path)
  47. prob, y_pred = bci_pipeline.data_evaluation(models, raw.get_data(), raw.info['sfreq'], events, trial_duration, True)
  48. # metrices: AUC, accuracy,
  49. y = events[:, -1]
  50. auc = bci_utils.multiclass_auc_score(y, prob)
  51. accu = accuracy_score(y, y_pred)
  52. f1 = f1_score(y, y_pred, pos_label=np.max(y), average='macro')
  53. # confusion matrix
  54. fig_conf = bci_viz.plot_confusion_matrix(y, y_pred)
  55. return (auc, accu, f1), fig_conf
  56. if __name__ == '__main__':
  57. args = parse_args()
  58. subj_name = args.subj
  59. data_dir = os.path.join(settings.DATA_PATH, subj_name)
  60. model_path = os.path.join(settings.MODEL_PATH, subj_name, args.model_filename)
  61. with open(os.path.join(data_dir, 'val_info.yml'), 'r') as f:
  62. info = yaml.safe_load(f)
  63. sessions = info['sessions']
  64. ori_epoch_length = info.get('ori_epoch_length', 5.)
  65. upsampled_trial_duration = config_info['buffer_length']
  66. # preprocess raw
  67. raw, event_id = neo.raw_loader(data_dir, sessions,
  68. ori_epoch_length=ori_epoch_length,
  69. reref_method=config_info['reref'],
  70. upsampled_epoch_length=upsampled_trial_duration)
  71. fs = raw.info['sfreq']
  72. events, _ = mne.events_from_annotations(raw, event_id)
  73. # Do validations
  74. metrices, fig_conf = val_by_epochs(raw, model_path, event_id, upsampled_trial_duration)
  75. # log results
  76. logger.info(f'Validation metrices: AUC: {metrices[0]:.4f}, Accuracy: {metrices[1]:.4f}, f1-score: {metrices[2]:.4f}')
  77. fig_conf.savefig(os.path.join(data_dir, 'confusion_matrix.pdf'))
  78. plt.show()