aboutsummaryrefslogtreecommitdiff
path: root/infra/libkookie/nixpkgs/pkgs/development/python-modules/transformers/default.nix
blob: 39fd7a11d0df4076525fa35e87516b2df648915f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
{ buildPythonPackage
, stdenv
, fetchFromGitHub
, boto3
, cookiecutter
, filelock
, regex
, requests
, numpy
, parameterized
, protobuf
, sacremoses
, timeout-decorator
, tokenizers
, tqdm
, pytestCheckHook
}:

buildPythonPackage rec {
  pname = "transformers";
  version = "4.0.0";

  src = fetchFromGitHub {
    owner = "huggingface";
    repo = pname;
    rev = "v${version}";
    sha256 = "17djq32pq8d6vqip7i9pda0ldigmzckbbcd278llmpxdriqd4llg";
  };

  propagatedBuildInputs = [
    cookiecutter
    filelock
    numpy
    protobuf
    regex
    requests
    sacremoses
    tokenizers
    tqdm
  ];

  checkInputs = [
    parameterized
    pytestCheckHook
    timeout-decorator
  ];

  postPatch = ''
    substituteInPlace setup.py \
      --replace "tokenizers == 0.9.4" "tokenizers"
  '';

  preCheck = ''
    export HOME="$TMPDIR"

    # This test requires the `datasets` module to download test
    # data. However, since we cannot download in the Nix sandbox
    # and `dataset` is an optional dependency for transformers
    # itself, we will just remove the tests files that import
    # `dataset`.
    rm tests/test_retrieval_rag.py
    rm tests/test_trainer.py
  '';

  # We have to run from the main directory for the tests. However,
  # letting pytest discover tests leads to errors.
  pytestFlagsArray = [ "tests" ];

  # Disable tests that require network access.
  disabledTests = [
    "BlenderbotSmallTokenizerTest"
    "Blenderbot3BTokenizerTests"
    "GetFromCacheTests"
    "TokenizationTest"
    "TestTokenizationBart"
    "test_all_tokenizers"
    "test_batch_encoding_is_fast"
    "test_batch_encoding_pickle"
    "test_batch_encoding_word_to_tokens"
    "test_config_from_model_shortcut"
    "test_config_model_type_from_model_identifier"
    "test_from_pretrained_use_fast_toggle"
    "test_hf_api"
    "test_outputs_can_be_shorter"
    "test_outputs_not_longer_than_maxlen"
    "test_padding_accepts_tensors"
    "test_pretokenized_tokenizers"
    "test_tokenizer_equivalence_en_de"
    "test_tokenizer_from_model_type"
    "test_tokenizer_from_model_type"
    "test_tokenizer_from_pretrained"
    "test_tokenizer_from_tokenizer_class"
    "test_tokenizer_identifier_with_correct_config"
    "test_tokenizer_identifier_non_existent"
  ];

  meta = with stdenv.lib; {
    homepage = "https://github.com/huggingface/transformers";
    description = "State-of-the-art Natural Language Processing for TensorFlow 2.0 and PyTorch";
    changelog = "https://github.com/huggingface/transformers/releases/tag/v${version}";
    license = licenses.asl20;
    platforms = platforms.unix;
    maintainers = with maintainers; [ danieldk pashashocky ];
  };
}