-blockmaxsize has been removed, but some tests were using this feature, so update with -blockmaxweight
        
      139+                                      ["-blockmaxweight=32000", "-maxorphantx=1000"]])
140         # Use node0 to mine blocks for input splitting
141         # Node1 mines small blocks but that are bigger than the expected transaction rate.
142         # NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes,
143-        # (17k is room enough for 110 or so transactions)
144+        # (68k weight is room enough for 120 or so transactions)
34+
35     def set_test_params(self):
36         self.setup_clean_chain = True
37         self.num_nodes = 1
38-        self.extra_args = [["-maxuploadtarget=800"]]
39+        self.extra_args = [["-maxuploadtarget=800", "-blockmaxweight=3996000"]]
137-                                      ["-maxorphantx=1000"]])
138+                                      ["-blockmaxweight=68000", "-maxorphantx=1000"],
139+                                      ["-blockmaxweight=32000", "-maxorphantx=1000"]])
140         # Use node0 to mine blocks for input splitting
141         # Node1 mines small blocks but that are bigger than the expected transaction rate.
142         # NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes,
-blockmaxweight=3996000 should be removed. One request to update comments inline.
        
      blockmaxsize 😄
        
      142-        # NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes,
143-        # (17k is room enough for 110 or so transactions)
144+        # NOTE: the CreateNewBlock code starts counting block weight at 4,000 weight,
145+        # (68k weight is room enough for 120 or so transactions)
146         # Node2 is a stingy miner, that
147         # produces too small blocks (room for only 55 or so transactions)
30@@ -31,14 +31,14 @@ def set_test_params(self):
31 
32         # Create nodes 0 and 1 to mine.
33         # Create node 2 to test pruning.
34-        self.full_node_default_args = ["-maxreceivebuffer=20000", "-checkblocks=5", "-limitdescendantcount=100", "-limitdescendantsize=5000", "-limitancestorcount=100", "-limitancestorsize=5000" ]
35+        self.full_node_default_args = ["-maxreceivebuffer=20000", "-blockmaxweight=3996000", "-checkblocks=5", "-limitdescendantcount=100", "-limitdescendantsize=5000", "-limitancestorcount=100", "-limitancestorsize=5000"]
39                            self.full_node_default_args,
40                            ["-maxreceivebuffer=20000", "-prune=550"],
41-                           ["-maxreceivebuffer=20000"],
42-                           ["-maxreceivebuffer=20000"],
43+                           ["-maxreceivebuffer=20000", "-blockmaxweight=3996000"],
44+                           ["-maxreceivebuffer=20000", "-blockmaxweight=3996000"],
Don’t we persist the mempool to disk?
Yes, unless -persistmempool=0 is set.