aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/scripts/python/export-to-sqlite.py
blob: 8043a7272a56dcacc8ea3924cfd7a94b8d517adc (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
# export-to-sqlite.py: export perf data to a sqlite3 database
# Copyright (c) 2017, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
# more details.

from __future__ import print_function

import os
import sys
import struct
import datetime

# To use this script you will need to have installed package python-pyside which
# provides LGPL-licensed Python bindings for Qt.  You will also need the package
# libqt4-sql-sqlite for Qt sqlite3 support.
#
# Examples of installing pyside:
#
# ubuntu:
#
#	$ sudo apt-get install python-pyside.qtsql libqt4-sql-psql
#
#	Alternately, to use Python3 and/or pyside 2, one of the following:
#
#		$ sudo apt-get install python3-pyside.qtsql libqt4-sql-psql
#		$ sudo apt-get install python-pyside2.qtsql libqt5sql5-psql
#		$ sudo apt-get install python3-pyside2.qtsql libqt5sql5-psql
# fedora:
#
#	$ sudo yum install python-pyside
#
#	Alternately, to use Python3 and/or pyside 2, one of the following:
#		$ sudo yum install python3-pyside
#		$ pip install --user PySide2
#		$ pip3 install --user PySide2
#
# An example of using this script with Intel PT:
#
#	$ perf record -e intel_pt//u ls
#	$ perf script -s ~/libexec/perf-core/scripts/python/export-to-sqlite.py pt_example branches calls
#	2017-07-31 14:26:07.326913 Creating database...
#	2017-07-31 14:26:07.538097 Writing records...
#	2017-07-31 14:26:09.889292 Adding indexes
#	2017-07-31 14:26:09.958746 Done
#
# To browse the database, sqlite3 can be used e.g.
#
#	$ sqlite3 pt_example
#	sqlite> .header on
#	sqlite> select * from samples_view where id < 10;
#	sqlite> .mode column
#	sqlite> select * from samples_view where id < 10;
#	sqlite> .tables
#	sqlite> .schema samples_view
#	sqlite> .quit
#
# An example of using the database is provided by the script
# exported-sql-viewer.py.  Refer to that script for details.
#
# The database structure is practically the same as created by the script
# export-to-postgresql.py. Refer to that script for details.  A notable
# difference is  the 'transaction' column of the 'samples' table which is
# renamed 'transaction_' in sqlite because 'transaction' is a reserved word.

pyside_version_1 = True
if not "pyside-version-1" in sys.argv:
	try:
		from PySide2.QtSql import *
		pyside_version_1 = False
	except:
		pass

if pyside_version_1:
	from PySide.QtSql import *

sys.path.append(os.environ['PERF_EXEC_PATH'] + \
	'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')

# These perf imports are not used at present
#from perf_trace_context import *
#from Core import *

perf_db_export_mode = True
perf_db_export_calls = False
perf_db_export_callchains = False

def printerr(*args, **keyword_args):
	print(*args, file=sys.stderr, **keyword_args)

def printdate(*args, **kw_args):
        print(datetime.datetime.today(), *args, sep=' ', **kw_args)

def usage():
	printerr("Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>] [<pyside-version-1>]");
	printerr("where:  columns            'all' or 'branches'");
	printerr("        calls              'calls' => create calls and call_paths table");
	printerr("        callchains         'callchains' => create call_paths table");
	printerr("        pyside-version-1   'pyside-version-1' => use pyside version 1");
	raise Exception("Too few or bad arguments")

if (len(sys.argv) < 2):
	usage()

dbname = sys.argv[1]

if (len(sys.argv) >= 3):
	columns = sys.argv[2]
else:
	columns = "all"

if columns not in ("all", "branches"):
	usage()

branches = (columns == "branches")

for i in range(3,len(sys.argv)):
	if (sys.argv[i] == "calls"):
		perf_db_export_calls = True
	elif (sys.argv[i] == "callchains"):
		perf_db_export_callchains = True
	elif (sys.argv[i] == "pyside-version-1"):
		pass
	else:
		usage()

def do_query(q, s):
	if (q.exec_(s)):
		return
	raise Exception("Query failed: " + q.lastError().text())

def do_query_(q):
	if (q.exec_()):
		return
	raise Exception("Query failed: " + q.lastError().text())

printdate("Creating database ...")

db_exists = False
try:
	f = open(dbname)
	f.close()
	db_exists = True
except:
	pass

if db_exists:
	raise Exception(dbname + " already exists")

db = QSqlDatabase.addDatabase('QSQLITE')
db.setDatabaseName(dbname)
db.open()

query = QSqlQuery(db)

do_query(query, 'PRAGMA journal_mode = OFF')
do_query(query, 'BEGIN TRANSACTION')

do_query(query, 'CREATE TABLE selected_events ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'name		varchar(80))')
do_query(query, 'CREATE TABLE machines ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'pid		integer,'
		'root_dir 	varchar(4096))')
do_query(query, 'CREATE TABLE threads ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'machine_id	bigint,'
		'process_id	bigint,'
		'pid		integer,'
		'tid		integer)')
do_query(query, 'CREATE TABLE comms ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'comm		varchar(16),'
		'c_thread_id	bigint,'
		'c_time		bigint,'
		'exec_flag	boolean)')
do_query(query, 'CREATE TABLE comm_threads ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'comm_id	bigint,'
		'thread_id	bigint)')
do_query(query, 'CREATE TABLE dsos ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'machine_id	bigint,'
		'short_name	varchar(256),'
		'long_name	varchar(4096),'
		'build_id	varchar(64))')
do_query(query, 'CREATE TABLE symbols ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'dso_id		bigint,'
		'sym_start	bigint,'
		'sym_end	bigint,'
		'binding	integer,'
		'name		varchar(2048))')
do_query(query, 'CREATE TABLE branch_types ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'name		varchar(80))')

if branches:
	do_query(query, 'CREATE TABLE samples ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'evsel_id	bigint,'
		'machine_id	bigint,'
		'thread_id	bigint,'
		'comm_id	bigint,'
		'dso_id		bigint,'
		'symbol_id	bigint,'
		'sym_offset	bigint,'
		'ip		bigint,'
		'time		bigint,'
		'cpu		integer,'
		'to_dso_id	bigint,'
		'to_symbol_id	bigint,'
		'to_sym_offset	bigint,'
		'to_ip		bigint,'
		'branch_type	integer,'
		'in_tx		boolean,'
		'call_path_id	bigint,'
		'insn_count	bigint,'
		'cyc_count	bigint)')
else:
	do_query(query, 'CREATE TABLE samples ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'evsel_id	bigint,'
		'machine_id	bigint,'
		'thread_id	bigint,'
		'comm_id	bigint,'
		'dso_id		bigint,'
		'symbol_id	bigint,'
		'sym_offset	bigint,'
		'ip		bigint,'
		'time		bigint,'
		'cpu		integer,'
		'to_dso_id	bigint,'
		'to_symbol_id	bigint,'
		'to_sym_offset	bigint,'
		'to_ip		bigint,'
		'period		bigint,'
		'weight		bigint,'
		'transaction_	bigint,'
		'data_src	bigint,'
		'branch_type	integer,'
		'in_tx		boolean,'
		'call_path_id	bigint,'
		'insn_count	bigint,'
		'cyc_count	bigint)')

if perf_db_export_calls or perf_db_export_callchains:
	do_query(query, 'CREATE TABLE call_paths ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'parent_id	bigint,'
		'symbol_id	bigint,'
		'ip		bigint)')
if perf_db_export_calls:
	do_query(query, 'CREATE TABLE calls ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'thread_id	bigint,'
		'comm_id	bigint,'
		'call_path_id	bigint,'
		'call_time	bigint,'
		'return_time	bigint,'
		'branch_count	bigint,'
		'call_id	bigint,'
		'return_id	bigint,'
		'parent_call_path_id	bigint,'
		'flags		integer,'
		'parent_id	bigint,'
		'insn_count	bigint,'
		'cyc_count	bigint)')

do_query(query, 'CREATE TABLE ptwrite ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'payload	bigint,'
		'exact_ip	integer)')

do_query(query, 'CREATE TABLE cbr ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'cbr		integer,'
		'mhz		integer,'
		'percent	integer)')

do_query(query, 'CREATE TABLE mwait ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'hints		integer,'
		'extensions	integer)')

do_query(query, 'CREATE TABLE pwre ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'cstate		integer,'
		'subcstate	integer,'
		'hw		integer)')

do_query(query, 'CREATE TABLE exstop ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'exact_ip	integer)')

do_query(query, 'CREATE TABLE pwrx ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'deepest_cstate	integer,'
		'last_cstate	integer,'
		'wake_reason	integer)')

do_query(query, 'CREATE TABLE context_switches ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'machine_id	bigint,'
		'time		bigint,'
		'cpu		integer,'
		'thread_out_id	bigint,'
		'comm_out_id	bigint,'
		'thread_in_id	bigint,'
		'comm_in_id	bigint,'
		'flags		integer)')

# printf was added to sqlite in version 3.8.3
sqlite_has_printf = False
try:
	do_query(query, 'SELECT printf("") FROM machines')
	sqlite_has_printf = True
except:
	pass

def emit_to_hex(x):
	if sqlite_has_printf:
		return 'printf("%x", ' + x + ')'
	else:
		return x

do_query(query, 'CREATE VIEW machines_view AS '
	'SELECT '
		'id,'
		'pid,'
		'root_dir,'
		'CASE WHEN id=0 THEN \'unknown\' WHEN pid=-1 THEN \'host\' ELSE \'guest\' END AS host_or_guest'
	' FROM machines')

do_query(query, 'CREATE VIEW dsos_view AS '
	'SELECT '
		'id,'
		'machine_id,'
		'(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
		'short_name,'
		'long_name,'
		'build_id'
	' FROM dsos')

do_query(query, 'CREATE VIEW symbols_view AS '
	'SELECT '
		'id,'
		'name,'
		'(SELECT short_name FROM dsos WHERE id=dso_id) AS dso,'
		'dso_id,'
		'sym_start,'
		'sym_end,'
		'CASE WHEN binding=0 THEN \'local\' WHEN binding=1 THEN \'global\' ELSE \'weak\' END AS binding'
	' FROM symbols')

do_query(query, 'CREATE VIEW threads_view AS '
	'SELECT '
		'id,'
		'machine_id,'
		'(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
		'process_id,'
		'pid,'
		'tid'
	' FROM threads')

do_query(query, 'CREATE VIEW comm_threads_view AS '
	'SELECT '
		'comm_id,'
		'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
		'thread_id,'
		'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
		'(SELECT tid FROM threads WHERE id = thread_id) AS tid'
	' FROM comm_threads')

if perf_db_export_calls or perf_db_export_callchains:
	do_query(query, 'CREATE VIEW call_paths_view AS '
		'SELECT '
			'c.id,'
			+ emit_to_hex('c.ip') + ' AS ip,'
			'c.symbol_id,'
			'(SELECT name FROM symbols WHERE id = c.symbol_id) AS symbol,'
			'(SELECT dso_id FROM symbols WHERE id = c.symbol_id) AS dso_id,'
			'(SELECT dso FROM symbols_view  WHERE id = c.symbol_id) AS dso_short_name,'
			'c.parent_id,'
			+ emit_to_hex('p.ip') + ' AS parent_ip,'
			'p.symbol_id AS parent_symbol_id,'
			'(SELECT name FROM symbols WHERE id = p.symbol_id) AS parent_symbol,'
			'(SELECT dso_id FROM symbols WHERE id = p.symbol_id) AS parent_dso_id,'
			'(SELECT dso FROM symbols_view  WHERE id = p.symbol_id) AS parent_dso_short_name'
		' FROM call_paths c INNER JOIN call_paths p ON p.id = c.parent_id')
if perf_db_export_calls:
	do_query(query, 'CREATE VIEW calls_view AS '
		'SELECT '
			'calls.id,'
			'thread_id,'
			'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
			'(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
			'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
			'call_path_id,'
			+ emit_to_hex('ip') + ' AS ip,'
			'symbol_id,'
			'(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
			'call_time,'
			'return_time,'
			'return_time - call_time AS elapsed_time,'
			'branch_count,'
			'insn_count,'
			'cyc_count,'
			'CASE WHEN cyc_count=0 THEN CAST(0 AS FLOAT) ELSE ROUND(CAST(insn_count AS FLOAT) / cyc_count, 2) END AS IPC,'
			'call_id,'
			'return_id,'
			'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE flags END AS flags,'
			'parent_call_path_id,'
			'calls.parent_id'
		' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')

do_query(query, 'CREATE VIEW samples_view AS '
	'SELECT '
		'id,'
		'time,'
		'cpu,'
		'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
		'(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
		'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
		'(SELECT name FROM selected_events WHERE id = evsel_id) AS event,'
		+ emit_to_hex('ip') + ' AS ip_hex,'
		'(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
		'sym_offset,'
		'(SELECT short_name FROM dsos WHERE id = dso_id) AS dso_short_name,'
		+ emit_to_hex('to_ip') + ' AS to_ip_hex,'
		'(SELECT name FROM symbols WHERE id = to_symbol_id) AS to_symbol,'
		'to_sym_offset,'
		'(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,'
		'(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,'
		'in_tx,'
		'insn_count,'
		'cyc_count,'
		'CASE WHEN cyc_count=0 THEN CAST(0 AS FLOAT) ELSE ROUND(CAST(insn_count AS FLOAT) / cyc_count, 2) END AS IPC'
	' FROM samples')

do_query(query, 'CREATE VIEW ptwrite_view AS '
	'SELECT '
		'ptwrite.id,'
		'time,'
		'cpu,'
		+ emit_to_hex('payload') + ' AS payload_hex,'
		'CASE WHEN exact_ip=0 THEN \'False\' ELSE \'True\' END AS exact_ip'
	' FROM ptwrite'
	' INNER JOIN samples ON samples.id = ptwrite.id')

do_query(query, 'CREATE VIEW cbr_view AS '
	'SELECT '
		'cbr.id,'
		'time,'
		'cpu,'
		'cbr,'
		'mhz,'
		'percent'
	' FROM cbr'
	' INNER JOIN samples ON samples.id = cbr.id')

do_query(query, 'CREATE VIEW mwait_view AS '
	'SELECT '
		'mwait.id,'
		'time,'
		'cpu,'
		+ emit_to_hex('hints') + ' AS hints_hex,'
		+ emit_to_hex('extensions') + ' AS extensions_hex'
	' FROM mwait'
	' INNER JOIN samples ON samples.id = mwait.id')

do_query(query, 'CREATE VIEW pwre_view AS '
	'SELECT '
		'pwre.id,'
		'time,'
		'cpu,'
		'cstate,'
		'subcstate,'
		'CASE WHEN hw=0 THEN \'False\' ELSE \'True\' END AS hw'
	' FROM pwre'
	' INNER JOIN samples ON samples.id = pwre.id')

do_query(query, 'CREATE VIEW exstop_view AS '
	'SELECT '
		'exstop.id,'
		'time,'
		'cpu,'
		'CASE WHEN exact_ip=0 THEN \'False\' ELSE \'True\' END AS exact_ip'
	' FROM exstop'
	' INNER JOIN samples ON samples.id = exstop.id')

do_query(query, 'CREATE VIEW pwrx_view AS '
	'SELECT '
		'pwrx.id,'
		'time,'
		'cpu,'
		'deepest_cstate,'
		'last_cstate,'
		'CASE     WHEN wake_reason=1 THEN \'Interrupt\''
			' WHEN wake_reason=2 THEN \'Timer Deadline\''
			' WHEN wake_reason=4 THEN \'Monitored Address\''
			' WHEN wake_reason=8 THEN \'HW\''
			' ELSE wake_reason '
		'END AS wake_reason'
	' FROM pwrx'
	' INNER JOIN samples ON samples.id = pwrx.id')

do_query(query, 'CREATE VIEW power_events_view AS '
	'SELECT '
		'samples.id,'
		'time,'
		'cpu,'
		'selected_events.name AS event,'
		'CASE WHEN selected_events.name=\'cbr\' THEN (SELECT cbr FROM cbr WHERE cbr.id = samples.id) ELSE "" END AS cbr,'
		'CASE WHEN selected_events.name=\'cbr\' THEN (SELECT mhz FROM cbr WHERE cbr.id = samples.id) ELSE "" END AS mhz,'
		'CASE WHEN selected_events.name=\'cbr\' THEN (SELECT percent FROM cbr WHERE cbr.id = samples.id) ELSE "" END AS percent,'
		'CASE WHEN selected_events.name=\'mwait\' THEN (SELECT ' + emit_to_hex('hints') + ' FROM mwait WHERE mwait.id = samples.id) ELSE "" END AS hints_hex,'
		'CASE WHEN selected_events.name=\'mwait\' THEN (SELECT ' + emit_to_hex('extensions') + ' FROM mwait WHERE mwait.id = samples.id) ELSE "" END AS extensions_hex,'
		'CASE WHEN selected_events.name=\'pwre\' THEN (SELECT cstate FROM pwre WHERE pwre.id = samples.id) ELSE "" END AS cstate,'
		'CASE WHEN selected_events.name=\'pwre\' THEN (SELECT subcstate FROM pwre WHERE pwre.id = samples.id) ELSE "" END AS subcstate,'
		'CASE WHEN selected_events.name=\'pwre\' THEN (SELECT hw FROM pwre WHERE pwre.id = samples.id) ELSE "" END AS hw,'
		'CASE WHEN selected_events.name=\'exstop\' THEN (SELECT exact_ip FROM exstop WHERE exstop.id = samples.id) ELSE "" END AS exact_ip,'
		'CASE WHEN selected_events.name=\'pwrx\' THEN (SELECT deepest_cstate FROM pwrx WHERE pwrx.id = samples.id) ELSE "" END AS deepest_cstate,'
		'CASE WHEN selected_events.name=\'pwrx\' THEN (SELECT last_cstate FROM pwrx WHERE pwrx.id = samples.id) ELSE "" END AS last_cstate,'
		'CASE WHEN selected_events.name=\'pwrx\' THEN (SELECT '
			'CASE     WHEN wake_reason=1 THEN \'Interrupt\''
				' WHEN wake_reason=2 THEN \'Timer Deadline\''
				' WHEN wake_reason=4 THEN \'Monitored Address\''
				' WHEN wake_reason=8 THEN \'HW\''
				' ELSE wake_reason '
			'END'
		' FROM pwrx WHERE pwrx.id = samples.id) ELSE "" END AS wake_reason'
	' FROM samples'
	' INNER JOIN selected_events ON selected_events.id = evsel_id'
	' WHERE selected_events.name IN (\'cbr\',\'mwait\',\'exstop\',\'pwre\',\'pwrx\')')

do_query(query, 'CREATE VIEW context_switches_view AS '
	'SELECT '
		'context_switches.id,'
		'context_switches.machine_id,'
		'context_switches.time,'
		'context_switches.cpu,'
		'th_out.pid AS pid_out,'
		'th_out.tid AS tid_out,'
		'comm_out.comm AS comm_out,'
		'th_in.pid AS pid_in,'
		'th_in.tid AS tid_in,'
		'comm_in.comm AS comm_in,'
		'CASE	  WHEN context_switches.flags = 0 THEN \'in\''
			' WHEN context_switches.flags = 1 THEN \'out\''
			' WHEN context_switches.flags = 3 THEN \'out preempt\''
			' ELSE context_switches.flags '
		'END AS flags'
	' FROM context_switches'
	' INNER JOIN threads AS th_out ON th_out.id   = context_switches.thread_out_id'
	' INNER JOIN threads AS th_in  ON th_in.id    = context_switches.thread_in_id'
	' INNER JOIN comms AS comm_out ON comm_out.id = context_switches.comm_out_id'
	' INNER JOIN comms AS comm_in  ON comm_in.id  = context_switches.comm_in_id')

do_query(query, 'END TRANSACTION')

evsel_query = QSqlQuery(db)
evsel_query.prepare("INSERT INTO selected_events VALUES (?, ?)")
machine_query = QSqlQuery(db)
machine_query.prepare("INSERT INTO machines VALUES (?, ?, ?)")
thread_query = QSqlQuery(db)
thread_query.prepare("INSERT INTO threads VALUES (?, ?, ?, ?, ?)")
comm_query = QSqlQuery(db)
comm_query.prepare("INSERT INTO comms VALUES (?, ?, ?, ?, ?)")
comm_thread_query = QSqlQuery(db)
comm_thread_query.prepare("INSERT INTO comm_threads VALUES (?, ?, ?)")
dso_query = QSqlQuery(db)
dso_query.prepare("INSERT INTO dsos VALUES (?, ?, ?, ?, ?)")
symbol_query = QSqlQuery(db)
symbol_query.prepare("INSERT INTO symbols VALUES (?, ?, ?, ?, ?, ?)")
branch_type_query = QSqlQuery(db)
branch_type_query.prepare("INSERT INTO branch_types VALUES (?, ?)")
sample_query = QSqlQuery(db)
if branches:
	sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
else:
	sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
if perf_db_export_calls or perf_db_export_callchains:
	call_path_query = QSqlQuery(db)
	call_path_query.prepare("INSERT INTO call_paths VALUES (?, ?, ?, ?)")
if perf_db_export_calls:
	call_query = QSqlQuery(db)
	call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
ptwrite_query = QSqlQuery(db)
ptwrite_query.prepare("INSERT INTO ptwrite VALUES (?, ?, ?)")
cbr_query = QSqlQuery(db)
cbr_query.prepare("INSERT INTO cbr VALUES (?, ?, ?, ?)")
mwait_query = QSqlQuery(db)
mwait_query.prepare("INSERT INTO mwait VALUES (?, ?, ?)")
pwre_query = QSqlQuery(db)
pwre_query.prepare("INSERT INTO pwre VALUES (?, ?, ?, ?)")
exstop_query = QSqlQuery(db)
exstop_query.prepare("INSERT INTO exstop VALUES (?, ?)")
pwrx_query = QSqlQuery(db)
pwrx_query.prepare("INSERT INTO pwrx VALUES (?, ?, ?, ?)")
context_switch_query = QSqlQuery(db)
context_switch_query.prepare("INSERT INTO context_switches VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)")

def trace_begin():
	printdate("Writing records...")
	do_query(query, 'BEGIN TRANSACTION')
	# id == 0 means unknown.  It is easier to create records for them than replace the zeroes with NULLs
	evsel_table(0, "unknown")
	machine_table(0, 0, "unknown")
	thread_table(0, 0, 0, -1, -1)
	comm_table(0, "unknown", 0, 0, 0)
	dso_table(0, 0, "unknown", "unknown", "")
	symbol_table(0, 0, 0, 0, 0, "unknown")
	sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
	if perf_db_export_calls or perf_db_export_callchains:
		call_path_table(0, 0, 0, 0)
		call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)

unhandled_count = 0

def is_table_empty(table_name):
	do_query(query, 'SELECT * FROM ' + table_name + ' LIMIT 1');
	if query.next():
		return False
	return True

def drop(table_name):
	do_query(query, 'DROP VIEW ' + table_name + '_view');
	do_query(query, 'DROP TABLE ' + table_name);

def trace_end():
	do_query(query, 'END TRANSACTION')

	printdate("Adding indexes")
	if perf_db_export_calls:
		do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
		do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')
		do_query(query, 'ALTER TABLE comms ADD has_calls boolean')
		do_query(query, 'UPDATE comms SET has_calls = 1 WHERE comms.id IN (SELECT DISTINCT comm_id FROM calls)')

	printdate("Dropping unused tables")
	if is_table_empty("ptwrite"):
		drop("ptwrite")
	if is_table_empty("mwait") and is_table_empty("pwre") and is_table_empty("exstop") and is_table_empty("pwrx"):
		do_query(query, 'DROP VIEW power_events_view');
		drop("mwait")
		drop("pwre")
		drop("exstop")
		drop("pwrx")
		if is_table_empty("cbr"):
			drop("cbr")
	if is_table_empty("context_switches"):
		drop("context_switches")

	if (unhandled_count):
		printdate("Warning: ", unhandled_count, " unhandled events")
	printdate("Done")

def trace_unhandled(event_name, context, event_fields_dict):
	global unhandled_count
	unhandled_count += 1

def sched__sched_switch(*x):
	pass

def bind_exec(q, n, x):
	for xx in x[0:n]:
		q.addBindValue(str(xx))
	do_query_(q)

def evsel_table(*x):
	bind_exec(evsel_query, 2, x)

def machine_table(*x):
	bind_exec(machine_query, 3, x)

def thread_table(*x):
	bind_exec(thread_query, 5, x)

def comm_table(*x):
	bind_exec(comm_query, 5, x)

def comm_thread_table(*x):
	bind_exec(comm_thread_query, 3, x)

def dso_table(*x):
	bind_exec(dso_query, 5, x)

def symbol_table(*x):
	bind_exec(symbol_query, 6, x)

def branch_type_table(*x):
	bind_exec(branch_type_query, 2, x)

def sample_table(*x):
	if branches:
		for xx in x[0:15]:
			sample_query.addBindValue(str(xx))
		for xx in x[19:24]:
			sample_query.addBindValue(str(xx))
		do_query_(sample_query)
	else:
		bind_exec(sample_query, 24, x)

def call_path_table(*x):
	bind_exec(call_path_query, 4, x)

def call_return_table(*x):
	bind_exec(call_query, 14, x)

def ptwrite(id, raw_buf):
	data = struct.unpack_from("<IQ", raw_buf)
	flags = data[0]
	payload = data[1]
	exact_ip = flags & 1
	ptwrite_query.addBindValue(str(id))
	ptwrite_query.addBindValue(str(payload))
	ptwrite_query.addBindValue(str(exact_ip))
	do_query_(ptwrite_query)

def cbr(id, raw_buf):
	data = struct.unpack_from("<BBBBII", raw_buf)
	cbr = data[0]
	MHz = (data[4] + 500) / 1000
	percent = ((cbr * 1000 / data[2]) + 5) / 10
	cbr_query.addBindValue(str(id))
	cbr_query.addBindValue(str(cbr))
	cbr_query.addBindValue(str(MHz))
	cbr_query.addBindValue(str(percent))
	do_query_(cbr_query)

def mwait(id, raw_buf):
	data = struct.unpack_from("<IQ", raw_buf)
	payload = data[1]
	hints = payload & 0xff
	extensions = (payload >> 32) & 0x3
	mwait_query.addBindValue(str(id))
	mwait_query.addBindValue(str(hints))
	mwait_query.addBindValue(str(extensions))
	do_query_(mwait_query)

def pwre(id, raw_buf):
	data = struct.unpack_from("<IQ", raw_buf)
	payload = data[1]
	hw = (payload >> 7) & 1
	cstate = (payload >> 12) & 0xf
	subcstate = (payload >> 8) & 0xf
	pwre_query.addBindValue(str(id))
	pwre_query.addBindValue(str(cstate))
	pwre_query.addBindValue(str(subcstate))
	pwre_query.addBindValue(str(hw))
	do_query_(pwre_query)

def exstop(id, raw_buf):
	data = struct.unpack_from("<I", raw_buf)
	flags = data[0]
	exact_ip = flags & 1
	exstop_query.addBindValue(str(id))
	exstop_query.addBindValue(str(exact_ip))
	do_query_(exstop_query)

def pwrx(id, raw_buf):
	data = struct.unpack_from("<IQ", raw_buf)
	payload = data[1]
	deepest_cstate = payload & 0xf
	last_cstate = (payload >> 4) & 0xf
	wake_reason = (payload >> 8) & 0xf
	pwrx_query.addBindValue(str(id))
	pwrx_query.addBindValue(str(deepest_cstate))
	pwrx_query.addBindValue(str(last_cstate))
	pwrx_query.addBindValue(str(wake_reason))
	do_query_(pwrx_query)

def synth_data(id, config, raw_buf, *x):
	if config == 0:
		ptwrite(id, raw_buf)
	elif config == 1:
		mwait(id, raw_buf)
	elif config == 2:
		pwre(id, raw_buf)
	elif config == 3:
		exstop(id, raw_buf)
	elif config == 4:
		pwrx(id, raw_buf)
	elif config == 5:
		cbr(id, raw_buf)

def context_switch_table(*x):
	bind_exec(context_switch_query, 9, x)
an>v_l1_size; static int v_l1_shift; static int v_l2_levels; /* The bottom level has pointers to PageDesc, and is indexed by * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size. */ #define V_L1_MIN_BITS 4 #define V_L1_MAX_BITS (V_L2_BITS + 3) #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS) static void *l1_map[V_L1_MAX_SIZE]; /* code generation context */ TCGContext tcg_init_ctx; __thread TCGContext *tcg_ctx; TBContext tb_ctx; bool parallel_cpus; static void page_table_config_init(void) { uint32_t v_l1_bits; assert(TARGET_PAGE_BITS); /* The bits remaining after N lower levels of page tables. */ v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS; if (v_l1_bits < V_L1_MIN_BITS) { v_l1_bits += V_L2_BITS; } v_l1_size = 1 << v_l1_bits; v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits; v_l2_levels = v_l1_shift / V_L2_BITS - 1; assert(v_l1_bits <= V_L1_MAX_BITS); assert(v_l1_shift % V_L2_BITS == 0); assert(v_l2_levels >= 0); } static void cpu_gen_init(void) { tcg_context_init(&tcg_init_ctx); } /* Encode VAL as a signed leb128 sequence at P. Return P incremented past the encoded value. */ static uint8_t *encode_sleb128(uint8_t *p, target_long val) { int more, byte; do { byte = val & 0x7f; val >>= 7; more = !((val == 0 && (byte & 0x40) == 0) || (val == -1 && (byte & 0x40) != 0)); if (more) { byte |= 0x80; } *p++ = byte; } while (more); return p; } /* Decode a signed leb128 sequence at *PP; increment *PP past the decoded value. Return the decoded value. */ static target_long decode_sleb128(const uint8_t **pp) { const uint8_t *p = *pp; target_long val = 0; int byte, shift = 0; do { byte = *p++; val |= (target_ulong)(byte & 0x7f) << shift; shift += 7; } while (byte & 0x80); if (shift < TARGET_LONG_BITS && (byte & 0x40)) { val |= -(target_ulong)1 << shift; } *pp = p; return val; } /* Encode the data collected about the instructions while compiling TB. Place the data at BLOCK, and return the number of bytes consumed. The logical table consists of TARGET_INSN_START_WORDS target_ulong's, which come from the target's insn_start data, followed by a uintptr_t which comes from the host pc of the end of the code implementing the insn. Each line of the table is encoded as sleb128 deltas from the previous line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }. That is, the first column is seeded with the guest pc, the last column with the host pc, and the middle columns with zeros. */ static int encode_search(TranslationBlock *tb, uint8_t *block) { uint8_t *highwater = tcg_ctx->code_gen_highwater; uint8_t *p = block; int i, j, n; for (i = 0, n = tb->icount; i < n; ++i) { target_ulong prev; for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { if (i == 0) { prev = (j == 0 ? tb->pc : 0); } else { prev = tcg_ctx->gen_insn_data[i - 1][j]; } p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev); } prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]); p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev); /* Test for (pending) buffer overflow. The assumption is that any one row beginning below the high water mark cannot overrun the buffer completely. Thus we can test for overflow after encoding a row without having to check during encoding. */ if (unlikely(p > highwater)) { return -1; } } return p - block; } /* The cpu state corresponding to 'searched_pc' is restored. * When reset_icount is true, current TB will be interrupted and * icount should be recalculated. */ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, uintptr_t searched_pc, bool reset_icount) { target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc }; uintptr_t host_pc = (uintptr_t)tb->tc.ptr; CPUArchState *env = cpu->env_ptr; const uint8_t *p = tb->tc.ptr + tb->tc.size; int i, j, num_insns = tb->icount; #ifdef CONFIG_PROFILER TCGProfile *prof = &tcg_ctx->prof; int64_t ti = profile_getclock(); #endif searched_pc -= GETPC_ADJ; if (searched_pc < host_pc) { return -1; } /* Reconstruct the stored insn data while looking for the point at which the end of the insn exceeds the searched_pc. */ for (i = 0; i < num_insns; ++i) { for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { data[j] += decode_sleb128(&p); } host_pc += decode_sleb128(&p); if (host_pc > searched_pc) { goto found; } } return -1; found: if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) { assert(icount_enabled()); /* Reset the cycle counter to the start of the block and shift if to the number of actually executed instructions */ cpu_neg(cpu)->icount_decr.u16.low += num_insns - i; } restore_state_to_opc(env, tb, data); #ifdef CONFIG_PROFILER qatomic_set(&prof->restore_time, prof->restore_time + profile_getclock() - ti); qatomic_set(&prof->restore_count, prof->restore_count + 1); #endif return 0; } void tb_destroy(TranslationBlock *tb) { qemu_spin_destroy(&tb->jmp_lock); } bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit) { /* * The host_pc has to be in the rx region of the code buffer. * If it is not we will not be able to resolve it here. * The two cases where host_pc will not be correct are: * * - fault during translation (instruction fetch) * - fault from helper (not using GETPC() macro) * * Either way we need return early as we can't resolve it here. */ if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) { TranslationBlock *tb = tcg_tb_lookup(host_pc); if (tb) { cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit); if (tb_cflags(tb) & CF_NOCACHE) { /* one-shot translation, invalidate it immediately */ tb_phys_invalidate(tb, -1); tcg_tb_remove(tb); tb_destroy(tb); } return true; } } return false; } static void page_init(void) { page_size_init(); page_table_config_init(); #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) { #ifdef HAVE_KINFO_GETVMMAP struct kinfo_vmentry *freep; int i, cnt; freep = kinfo_getvmmap(getpid(), &cnt); if (freep) { mmap_lock(); for (i = 0; i < cnt; i++) { unsigned long startaddr, endaddr; startaddr = freep[i].kve_start; endaddr = freep[i].kve_end; if (h2g_valid(startaddr)) { startaddr = h2g(startaddr) & TARGET_PAGE_MASK; if (h2g_valid(endaddr)) { endaddr = h2g(endaddr); page_set_flags(startaddr, endaddr, PAGE_RESERVED); } else { #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS endaddr = ~0ul; page_set_flags(startaddr, endaddr, PAGE_RESERVED); #endif } } } free(freep); mmap_unlock(); } #else FILE *f; last_brk = (unsigned long)sbrk(0); f = fopen("/compat/linux/proc/self/maps", "r"); if (f) { mmap_lock(); do { unsigned long startaddr, endaddr; int n; n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr); if (n == 2 && h2g_valid(startaddr)) { startaddr = h2g(startaddr) & TARGET_PAGE_MASK; if (h2g_valid(endaddr)) { endaddr = h2g(endaddr); } else { endaddr = ~0ul; } page_set_flags(startaddr, endaddr, PAGE_RESERVED); } } while (!feof(f)); fclose(f); mmap_unlock(); } #endif } #endif } static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) { PageDesc *pd; void **lp; int i; /* Level 1. Always allocated. */ lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1)); /* Level 2..N-1. */ for (i = v_l2_levels; i > 0; i--) { void **p = qatomic_rcu_read(lp); if (p == NULL) { void *existing; if (!alloc) { return NULL; } p = g_new0(void *, V_L2_SIZE); existing = qatomic_cmpxchg(lp, NULL, p); if (unlikely(existing)) { g_free(p); p = existing; } } lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1)); } pd = qatomic_rcu_read(lp); if (pd == NULL) { void *existing; if (!alloc) { return NULL; } pd = g_new0(PageDesc, V_L2_SIZE); #ifndef CONFIG_USER_ONLY { int i; for (i = 0; i < V_L2_SIZE; i++) { qemu_spin_init(&pd[i].lock); } } #endif existing = qatomic_cmpxchg(lp, NULL, pd); if (unlikely(existing)) { #ifndef CONFIG_USER_ONLY { int i; for (i = 0; i < V_L2_SIZE; i++) { qemu_spin_destroy(&pd[i].lock); } } #endif g_free(pd); pd = existing; } } return pd + (index & (V_L2_SIZE - 1)); } static inline PageDesc *page_find(tb_page_addr_t index) { return page_find_alloc(index, 0); } static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1, PageDesc **ret_p2, tb_page_addr_t phys2, int alloc); /* In user-mode page locks aren't used; mmap_lock is enough */ #ifdef CONFIG_USER_ONLY #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock()) static inline void page_lock(PageDesc *pd) { } static inline void page_unlock(PageDesc *pd) { } static inline void page_lock_tb(const TranslationBlock *tb) { } static inline void page_unlock_tb(const TranslationBlock *tb) { } struct page_collection * page_collection_lock(tb_page_addr_t start, tb_page_addr_t end) { return NULL; } void page_collection_unlock(struct page_collection *set) { } #else /* !CONFIG_USER_ONLY */ #ifdef CONFIG_DEBUG_TCG static __thread GHashTable *ht_pages_locked_debug; static void ht_pages_locked_debug_init(void) { if (ht_pages_locked_debug) { return; } ht_pages_locked_debug = g_hash_table_new(NULL, NULL); } static bool page_is_locked(const PageDesc *pd) { PageDesc *found; ht_pages_locked_debug_init(); found = g_hash_table_lookup(ht_pages_locked_debug, pd); return !!found; } static void page_lock__debug(PageDesc *pd) { ht_pages_locked_debug_init(); g_assert(!page_is_locked(pd)); g_hash_table_insert(ht_pages_locked_debug, pd, pd); } static void page_unlock__debug(const PageDesc *pd) { bool removed; ht_pages_locked_debug_init(); g_assert(page_is_locked(pd)); removed = g_hash_table_remove(ht_pages_locked_debug, pd); g_assert(removed); } static void do_assert_page_locked(const PageDesc *pd, const char *file, int line) { if (unlikely(!page_is_locked(pd))) { error_report("assert_page_lock: PageDesc %p not locked @ %s:%d", pd, file, line); abort(); } } #define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__) void assert_no_pages_locked(void) { ht_pages_locked_debug_init(); g_assert(g_hash_table_size(ht_pages_locked_debug) == 0); } #else /* !CONFIG_DEBUG_TCG */ #define assert_page_locked(pd) static inline void page_lock__debug(const PageDesc *pd) { } static inline void page_unlock__debug(const PageDesc *pd) { } #endif /* CONFIG_DEBUG_TCG */ static inline void page_lock(PageDesc *pd) { page_lock__debug(pd); qemu_spin_lock(&pd->lock); } static inline void page_unlock(PageDesc *pd) { qemu_spin_unlock(&pd->lock); page_unlock__debug(pd); } /* lock the page(s) of a TB in the correct acquisition order */ static inline void page_lock_tb(const TranslationBlock *tb) { page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0); } static inline void page_unlock_tb(const TranslationBlock *tb) { PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); page_unlock(p1); if (unlikely(tb->page_addr[1] != -1)) { PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); if (p2 != p1) { page_unlock(p2); } } } static inline struct page_entry * page_entry_new(PageDesc *pd, tb_page_addr_t index) { struct page_entry *pe = g_malloc(sizeof(*pe)); pe->index = index; pe->pd = pd; pe->locked = false; return pe; } static void page_entry_destroy(gpointer p) { struct page_entry *pe = p; g_assert(pe->locked); page_unlock(pe->pd); g_free(pe); } /* returns false on success */ static bool page_entry_trylock(struct page_entry *pe) { bool busy; busy = qemu_spin_trylock(&pe->pd->lock); if (!busy) { g_assert(!pe->locked); pe->locked = true; page_lock__debug(pe->pd); } return busy; } static void do_page_entry_lock(struct page_entry *pe) { page_lock(pe->pd); g_assert(!pe->locked); pe->locked = true; } static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data) { struct page_entry *pe = value; do_page_entry_lock(pe); return FALSE; } static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data) { struct page_entry *pe = value; if (pe->locked) { pe->locked = false; page_unlock(pe->pd); } return FALSE; } /* * Trylock a page, and if successful, add the page to a collection. * Returns true ("busy") if the page could not be locked; false otherwise. */ static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr) { tb_page_addr_t index = addr >> TARGET_PAGE_BITS; struct page_entry *pe; PageDesc *pd; pe = g_tree_lookup(set->tree, &index); if (pe) { return false; } pd = page_find(index); if (pd == NULL) { return false; } pe = page_entry_new(pd, index); g_tree_insert(set->tree, &pe->index, pe); /* * If this is either (1) the first insertion or (2) a page whose index * is higher than any other so far, just lock the page and move on. */ if (set->max == NULL || pe->index > set->max->index) { set->max = pe; do_page_entry_lock(pe); return false; } /* * Try to acquire out-of-order lock; if busy, return busy so that we acquire * locks in order. */ return page_entry_trylock(pe); } static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata) { tb_page_addr_t a = *(const tb_page_addr_t *)ap; tb_page_addr_t b = *(const tb_page_addr_t *)bp; if (a == b) { return 0; } else if (a < b) { return -1; } return 1; } /* * Lock a range of pages ([@start,@end[) as well as the pages of all * intersecting TBs. * Locking order: acquire locks in ascending order of page index. */ struct page_collection * page_collection_lock(tb_page_addr_t start, tb_page_addr_t end) { struct page_collection *set = g_malloc(sizeof(*set)); tb_page_addr_t index; PageDesc *pd; start >>= TARGET_PAGE_BITS; end >>= TARGET_PAGE_BITS; g_assert(start <= end); set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL, page_entry_destroy); set->max = NULL; assert_no_pages_locked(); retry: g_tree_foreach(set->tree, page_entry_lock, NULL); for (index = start; index <= end; index++) { TranslationBlock *tb; int n; pd = page_find(index); if (pd == NULL) { continue; } if (page_trylock_add(set, index << TARGET_PAGE_BITS)) { g_tree_foreach(set->tree, page_entry_unlock, NULL); goto retry; } assert_page_locked(pd); PAGE_FOR_EACH_TB(pd, tb, n) { if (page_trylock_add(set, tb->page_addr[0]) || (tb->page_addr[1] != -1 && page_trylock_add(set, tb->page_addr[1]))) { /* drop all locks, and reacquire in order */ g_tree_foreach(set->tree, page_entry_unlock, NULL); goto retry; } } } return set; } void page_collection_unlock(struct page_collection *set) { /* entries are unlocked and freed via page_entry_destroy */ g_tree_destroy(set->tree); g_free(set); } #endif /* !CONFIG_USER_ONLY */ static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1, PageDesc **ret_p2, tb_page_addr_t phys2, int alloc) { PageDesc *p1, *p2; tb_page_addr_t page1; tb_page_addr_t page2; assert_memory_lock(); g_assert(phys1 != -1); page1 = phys1 >> TARGET_PAGE_BITS; page2 = phys2 >> TARGET_PAGE_BITS; p1 = page_find_alloc(page1, alloc); if (ret_p1) { *ret_p1 = p1; } if (likely(phys2 == -1)) { page_lock(p1); return; } else if (page1 == page2) { page_lock(p1); if (ret_p2) { *ret_p2 = p1; } return; } p2 = page_find_alloc(page2, alloc); if (ret_p2) { *ret_p2 = p2; } if (page1 < page2) { page_lock(p1); page_lock(p2); } else { page_lock(p2); page_lock(p1); } } /* Minimum size of the code gen buffer. This number is randomly chosen, but not so small that we can't have a fair number of TB's live. */ #define MIN_CODE_GEN_BUFFER_SIZE (1 * MiB) /* Maximum size of the code gen buffer we'd like to use. Unless otherwise indicated, this is constrained by the range of direct branches on the host cpu, as used by the TCG implementation of goto_tb. */ #if defined(__x86_64__) # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB) #elif defined(__sparc__) # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB) #elif defined(__powerpc64__) # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB) #elif defined(__powerpc__) # define MAX_CODE_GEN_BUFFER_SIZE (32 * MiB) #elif defined(__aarch64__) # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB) #elif defined(__s390x__) /* We have a +- 4GB range on the branches; leave some slop. */ # define MAX_CODE_GEN_BUFFER_SIZE (3 * GiB) #elif defined(__mips__) /* We have a 256MB branch region, but leave room to make sure the main executable is also within that region. */ # define MAX_CODE_GEN_BUFFER_SIZE (128 * MiB) #else # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) #endif #if TCG_TARGET_REG_BITS == 32 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB) #ifdef CONFIG_USER_ONLY /* * For user mode on smaller 32 bit systems we may run into trouble * allocating big chunks of data in the right place. On these systems * we utilise a static code generation buffer directly in the binary. */ #define USE_STATIC_CODE_GEN_BUFFER #endif #else /* TCG_TARGET_REG_BITS == 64 */ #ifdef CONFIG_USER_ONLY /* * As user-mode emulation typically means running multiple instances * of the translator don't go too nuts with our default code gen * buffer lest we make things too hard for the OS. */ #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB) #else /* * We expect most system emulation to run one or two guests per host. * Users running large scale system emulation may want to tweak their * runtime setup via the tb-size control on the command line. */ #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB) #endif #endif #define DEFAULT_CODE_GEN_BUFFER_SIZE \ (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \ ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE) static size_t size_code_gen_buffer(size_t tb_size) { /* Size the buffer. */ if (tb_size == 0) { size_t phys_mem = qemu_get_host_physmem(); if (phys_mem == 0) { tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE; } else { tb_size = MIN(DEFAULT_CODE_GEN_BUFFER_SIZE, phys_mem / 8); } } if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) { tb_size = MIN_CODE_GEN_BUFFER_SIZE; } if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) { tb_size = MAX_CODE_GEN_BUFFER_SIZE; } return tb_size; } #ifdef __mips__ /* In order to use J and JAL within the code_gen_buffer, we require that the buffer not cross a 256MB boundary. */ static inline bool cross_256mb(void *addr, size_t size) { return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful; } /* We weren't able to allocate a buffer without crossing that boundary, so make do with the larger portion of the buffer that doesn't cross. Returns the new base of the buffer, and adjusts code_gen_buffer_size. */ static inline void *split_cross_256mb(void *buf1, size_t size1) { void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful); size_t size2 = buf1 + size1 - buf2; size1 = buf2 - buf1; if (size1 < size2) { size1 = size2; buf1 = buf2; } tcg_ctx->code_gen_buffer_size = size1; return buf1; } #endif #ifdef USE_STATIC_CODE_GEN_BUFFER static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] __attribute__((aligned(CODE_GEN_ALIGN))); static bool alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp) { void *buf, *end; size_t size; if (splitwx > 0) { error_setg(errp, "jit split-wx not supported"); return false; } /* page-align the beginning and end of the buffer */ buf = static_code_gen_buffer; end = static_code_gen_buffer + sizeof(static_code_gen_buffer); buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size); end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size); size = end - buf; /* Honor a command-line option limiting the size of the buffer. */ if (size > tb_size) { size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size); } tcg_ctx->code_gen_buffer_size = size; #ifdef __mips__ if (cross_256mb(buf, size)) { buf = split_cross_256mb(buf, size); size = tcg_ctx->code_gen_buffer_size; } #endif if (qemu_mprotect_rwx(buf, size)) { error_setg_errno(errp, errno, "mprotect of jit buffer"); return false; } qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); tcg_ctx->code_gen_buffer = buf; return true; } #elif defined(_WIN32) static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp) { void *buf; if (splitwx > 0) { error_setg(errp, "jit split-wx not supported"); return false; } buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT, PAGE_EXECUTE_READWRITE); if (buf == NULL) { error_setg_win32(errp, GetLastError(), "allocate %zu bytes for jit buffer", size); return false; } tcg_ctx->code_gen_buffer = buf; tcg_ctx->code_gen_buffer_size = size; return true; } #else static bool alloc_code_gen_buffer_anon(size_t size, int prot, int flags, Error **errp) { void *buf; buf = mmap(NULL, size, prot, flags, -1, 0); if (buf == MAP_FAILED) { error_setg_errno(errp, errno, "allocate %zu bytes for jit buffer", size); return false; } tcg_ctx->code_gen_buffer_size = size; #ifdef __mips__ if (cross_256mb(buf, size)) { /* * Try again, with the original still mapped, to avoid re-acquiring * the same 256mb crossing. */ size_t size2; void *buf2 = mmap(NULL, size, prot, flags, -1, 0); switch ((int)(buf2 != MAP_FAILED)) { case 1: if (!cross_256mb(buf2, size)) { /* Success! Use the new buffer. */ munmap(buf, size); break; } /* Failure. Work with what we had. */ munmap(buf2, size); /* fallthru */ default: /* Split the original buffer. Free the smaller half. */ buf2 = split_cross_256mb(buf, size); size2 = tcg_ctx->code_gen_buffer_size; if (buf == buf2) { munmap(buf + size2, size - size2); } else { munmap(buf, size - size2); } size = size2; break; } buf = buf2; } #endif /* Request large pages for the buffer. */ qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); tcg_ctx->code_gen_buffer = buf; return true; } #ifndef CONFIG_TCG_INTERPRETER #ifdef CONFIG_POSIX #include "qemu/memfd.h" static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp) { void *buf_rw = NULL, *buf_rx = MAP_FAILED; int fd = -1; #ifdef __mips__ /* Find space for the RX mapping, vs the 256MiB regions. */ if (!alloc_code_gen_buffer_anon(size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, errp)) { return false; } /* The size of the mapping may have been adjusted. */ size = tcg_ctx->code_gen_buffer_size; buf_rx = tcg_ctx->code_gen_buffer; #endif buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp); if (buf_rw == NULL) { goto fail; } #ifdef __mips__ void *tmp = mmap(buf_rx, size, PROT_READ | PROT_EXEC, MAP_SHARED | MAP_FIXED, fd, 0); if (tmp != buf_rx) { goto fail_rx; } #else buf_rx = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0); if (buf_rx == MAP_FAILED) { goto fail_rx; } #endif close(fd); tcg_ctx->code_gen_buffer = buf_rw; tcg_ctx->code_gen_buffer_size = size; tcg_splitwx_diff = buf_rx - buf_rw; /* Request large pages for the buffer and the splitwx. */ qemu_madvise(buf_rw, size, QEMU_MADV_HUGEPAGE); qemu_madvise(buf_rx, size, QEMU_MADV_HUGEPAGE); return true; fail_rx: error_setg_errno(errp, errno, "failed to map shared memory for execute"); fail: if (buf_rx != MAP_FAILED) { munmap(buf_rx, size); } if (buf_rw) { munmap(buf_rw, size); } if (fd >= 0) { close(fd); } return false; } #endif /* CONFIG_POSIX */ #ifdef CONFIG_DARWIN #include <mach/mach.h> extern kern_return_t mach_vm_remap(vm_map_t target_task, mach_vm_address_t *target_address, mach_vm_size_t size, mach_vm_offset_t mask, int flags, vm_map_t src_task, mach_vm_address_t src_address, boolean_t copy, vm_prot_t *cur_protection, vm_prot_t *max_protection, vm_inherit_t inheritance); static bool alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp) { kern_return_t ret; mach_vm_address_t buf_rw, buf_rx; vm_prot_t cur_prot, max_prot; /* Map the read-write portion via normal anon memory. */ if (!alloc_code_gen_buffer_anon(size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, errp)) { return false; } buf_rw = (mach_vm_address_t)tcg_ctx->code_gen_buffer; buf_rx = 0; ret = mach_vm_remap(mach_task_self(), &buf_rx, size, 0, VM_FLAGS_ANYWHERE, mach_task_self(), buf_rw, false, &cur_prot, &max_prot, VM_INHERIT_NONE); if (ret != KERN_SUCCESS) { /* TODO: Convert "ret" to a human readable error message. */ error_setg(errp, "vm_remap for jit splitwx failed"); munmap((void *)buf_rw, size); return false; } if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) { error_setg_errno(errp, errno, "mprotect for jit splitwx"); munmap((void *)buf_rx, size); munmap((void *)buf_rw, size); return false; } tcg_splitwx_diff = buf_rx - buf_rw; return true; } #endif /* CONFIG_DARWIN */ #endif /* CONFIG_TCG_INTERPRETER */ static bool alloc_code_gen_buffer_splitwx(size_t size, Error **errp) { #ifndef CONFIG_TCG_INTERPRETER # ifdef CONFIG_DARWIN return alloc_code_gen_buffer_splitwx_vmremap(size, errp); # endif # ifdef CONFIG_POSIX return alloc_code_gen_buffer_splitwx_memfd(size, errp); # endif #endif error_setg(errp, "jit split-wx not supported"); return false; } static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp) { ERRP_GUARD(); int prot, flags; if (splitwx) { if (alloc_code_gen_buffer_splitwx(size, errp)) { return true; } /* * If splitwx force-on (1), fail; * if splitwx default-on (-1), fall through to splitwx off. */ if (splitwx > 0) { return false; } error_free_or_abort(errp); } prot = PROT_READ | PROT_WRITE | PROT_EXEC; flags = MAP_PRIVATE | MAP_ANONYMOUS; #ifdef CONFIG_TCG_INTERPRETER /* The tcg interpreter does not need execute permission. */ prot = PROT_READ | PROT_WRITE; #elif defined(CONFIG_DARWIN) /* Applicable to both iOS and macOS (Apple Silicon). */ if (!splitwx) { flags |= MAP_JIT; } #endif return alloc_code_gen_buffer_anon(size, prot, flags, errp); } #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */ static bool tb_cmp(const void *ap, const void *bp) { const TranslationBlock *a = ap; const TranslationBlock *b = bp; return a->pc == b->pc && a->cs_base == b->cs_base && a->flags == b->flags && (tb_cflags(a) & CF_HASH_MASK) == (tb_cflags(b) & CF_HASH_MASK) && a->trace_vcpu_dstate == b->trace_vcpu_dstate && a->page_addr[0] == b->page_addr[0] && a->page_addr[1] == b->page_addr[1]; } static void tb_htable_init(void) { unsigned int mode = QHT_MODE_AUTO_RESIZE; qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode); } /* Must be called before using the QEMU cpus. 'tb_size' is the size (in bytes) allocated to the translation buffer. Zero means default size. */ void tcg_exec_init(unsigned long tb_size, int splitwx) { bool ok; tcg_allowed = true; cpu_gen_init(); page_init(); tb_htable_init(); ok = alloc_code_gen_buffer(size_code_gen_buffer(tb_size), splitwx, &error_fatal); assert(ok); #if defined(CONFIG_SOFTMMU) /* There's no guest base to take into account, so go ahead and initialize the prologue now. */ tcg_prologue_init(tcg_ctx); #endif } /* call with @p->lock held */ static inline void invalidate_page_bitmap(PageDesc *p) { assert_page_locked(p); #ifdef CONFIG_SOFTMMU g_free(p->code_bitmap); p->code_bitmap = NULL; p->code_write_count = 0; #endif } /* Set to NULL all the 'first_tb' fields in all PageDescs. */ static void page_flush_tb_1(int level, void **lp) { int i; if (*lp == NULL) { return; } if (level == 0) { PageDesc *pd = *lp; for (i = 0; i < V_L2_SIZE; ++i) { page_lock(&pd[i]); pd[i].first_tb = (uintptr_t)NULL; invalidate_page_bitmap(pd + i); page_unlock(&pd[i]); } } else { void **pp = *lp; for (i = 0; i < V_L2_SIZE; ++i) { page_flush_tb_1(level - 1, pp + i); } } } static void page_flush_tb(void) { int i, l1_sz = v_l1_size; for (i = 0; i < l1_sz; i++) { page_flush_tb_1(v_l2_levels, l1_map + i); } } static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data) { const TranslationBlock *tb = value; size_t *size = data; *size += tb->tc.size; return false; } /* flush all the translation blocks */ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count) { bool did_flush = false; mmap_lock(); /* If it is already been done on request of another CPU, * just retry. */ if (tb_ctx.tb_flush_count != tb_flush_count.host_int) { goto done; } did_flush = true; if (DEBUG_TB_FLUSH_GATE) { size_t nb_tbs = tcg_nb_tbs(); size_t host_size = 0; tcg_tb_foreach(tb_host_size_iter, &host_size); printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n", tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0); } CPU_FOREACH(cpu) { cpu_tb_jmp_cache_clear(cpu); } qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE); page_flush_tb(); tcg_region_reset_all(); /* XXX: flush processor icache at this point if cache flush is expensive */ qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1); done: mmap_unlock(); if (did_flush) { qemu_plugin_flush_cb(); } } void tb_flush(CPUState *cpu) { if (tcg_enabled()) { unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count); if (cpu_in_exclusive_context(cpu)) { do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count)); } else { async_safe_run_on_cpu(cpu, do_tb_flush, RUN_ON_CPU_HOST_INT(tb_flush_count)); } } } /* * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only, * so in order to prevent bit rot we compile them unconditionally in user-mode, * and let the optimizer get rid of them by wrapping their user-only callers * with if (DEBUG_TB_CHECK_GATE). */ #ifdef CONFIG_USER_ONLY static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp) { TranslationBlock *tb = p; target_ulong addr = *(target_ulong *)userp; if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) { printf("ERROR invalidate: address=" TARGET_FMT_lx " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size); } } /* verify that all the pages have correct rights for code * * Called with mmap_lock held. */ static void tb_invalidate_check(target_ulong address) { address &= TARGET_PAGE_MASK; qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address); } static void do_tb_page_check(void *p, uint32_t hash, void *userp) { TranslationBlock *tb = p; int flags1, flags2; flags1 = page_get_flags(tb->pc); flags2 = page_get_flags(tb->pc + tb->size - 1); if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", (long)tb->pc, tb->size, flags1, flags2); } } /* verify that all the pages have correct rights for code */ static void tb_page_check(void) { qht_iter(&tb_ctx.htable, do_tb_page_check, NULL); } #endif /* CONFIG_USER_ONLY */ /* * user-mode: call with mmap_lock held * !user-mode: call with @pd->lock held */ static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb) { TranslationBlock *tb1; uintptr_t *pprev; unsigned int n1; assert_page_locked(pd); pprev = &pd->first_tb; PAGE_FOR_EACH_TB(pd, tb1, n1) { if (tb1 == tb) { *pprev = tb1->page_next[n1]; return; } pprev = &tb1->page_next[n1]; } g_assert_not_reached(); } /* remove @orig from its @n_orig-th jump list */ static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig) { uintptr_t ptr, ptr_locked; TranslationBlock *dest; TranslationBlock *tb; uintptr_t *pprev; int n; /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */ ptr = qatomic_or_fetch(&orig->jmp_dest[n_orig], 1); dest = (TranslationBlock *)(ptr & ~1); if (dest == NULL) { return; } qemu_spin_lock(&dest->jmp_lock); /* * While acquiring the lock, the jump might have been removed if the * destination TB was invalidated; check again. */ ptr_locked = qatomic_read(&orig->jmp_dest[n_orig]); if (ptr_locked != ptr) { qemu_spin_unlock(&dest->jmp_lock); /* * The only possibility is that the jump was unlinked via * tb_jump_unlink(dest). Seeing here another destination would be a bug, * because we set the LSB above. */ g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID); return; } /* * We first acquired the lock, and since the destination pointer matches, * we know for sure that @orig is in the jmp list. */ pprev = &dest->jmp_list_head; TB_FOR_EACH_JMP(dest, tb, n) { if (tb == orig && n == n_orig) { *pprev = tb->jmp_list_next[n]; /* no need to set orig->jmp_dest[n]; setting the LSB was enough */ qemu_spin_unlock(&dest->jmp_lock); return; } pprev = &tb->jmp_list_next[n]; } g_assert_not_reached(); } /* reset the jump entry 'n' of a TB so that it is not chained to another TB */ static inline void tb_reset_jump(TranslationBlock *tb, int n) { uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]); tb_set_jmp_target(tb, n, addr); } /* remove any jumps to the TB */ static inline void tb_jmp_unlink(TranslationBlock *dest) { TranslationBlock *tb; int n; qemu_spin_lock(&dest->jmp_lock); TB_FOR_EACH_JMP(dest, tb, n) { tb_reset_jump(tb, n); qatomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1); /* No need to clear the list entry; setting the dest ptr is enough */ } dest->jmp_list_head = (uintptr_t)NULL; qemu_spin_unlock(&dest->jmp_lock); } /* * In user-mode, call with mmap_lock held. * In !user-mode, if @rm_from_page_list is set, call with the TB's pages' * locks held. */ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) { CPUState *cpu; PageDesc *p; uint32_t h; tb_page_addr_t phys_pc; assert_memory_lock(); /* make sure no further incoming jumps will be chained to this TB */ qemu_spin_lock(&tb->jmp_lock); qatomic_set(&tb->cflags, tb->cflags | CF_INVALID); qemu_spin_unlock(&tb->jmp_lock); /* remove the TB from the hash list */ phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb_cflags(tb) & CF_HASH_MASK, tb->trace_vcpu_dstate); if (!(tb->cflags & CF_NOCACHE) && !qht_remove(&tb_ctx.htable, tb, h)) { return; } /* remove the TB from the page list */ if (rm_from_page_list) { p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); tb_page_remove(p, tb); invalidate_page_bitmap(p); if (tb->page_addr[1] != -1) { p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); tb_page_remove(p, tb); invalidate_page_bitmap(p); } } /* remove the TB from the hash list */ h = tb_jmp_cache_hash_func(tb->pc); CPU_FOREACH(cpu) { if (qatomic_read(&cpu->tb_jmp_cache[h]) == tb) { qatomic_set(&cpu->tb_jmp_cache[h], NULL); } } /* suppress this TB from the two jump lists */ tb_remove_from_jmp_list(tb, 0); tb_remove_from_jmp_list(tb, 1); /* suppress any remaining jumps to this TB */ tb_jmp_unlink(tb); qatomic_set(&tcg_ctx->tb_phys_invalidate_count, tcg_ctx->tb_phys_invalidate_count + 1); } static void tb_phys_invalidate__locked(TranslationBlock *tb) { qemu_thread_jit_write(); do_tb_phys_invalidate(tb, true); qemu_thread_jit_execute(); } /* invalidate one TB * * Called with mmap_lock held in user-mode. */ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) { if (page_addr == -1 && tb->page_addr[0] != -1) { page_lock_tb(tb); do_tb_phys_invalidate(tb, true); page_unlock_tb(tb); } else { do_tb_phys_invalidate(tb, false); } } #ifdef CONFIG_SOFTMMU /* call with @p->lock held */ static void build_page_bitmap(PageDesc *p) { int n, tb_start, tb_end; TranslationBlock *tb; assert_page_locked(p); p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE); PAGE_FOR_EACH_TB(p, tb, n) { /* NOTE: this is subtle as a TB may span two physical pages */ if (n == 0) { /* NOTE: tb_end may be after the end of the page, but it is not a problem */ tb_start = tb->pc & ~TARGET_PAGE_MASK; tb_end = tb_start + tb->size; if (tb_end > TARGET_PAGE_SIZE) { tb_end = TARGET_PAGE_SIZE; } } else { tb_start = 0; tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); } bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start); } } #endif /* add the tb in the target page and protect it if necessary * * Called with mmap_lock held for user-mode emulation. * Called with @p->lock held in !user-mode. */ static inline void tb_page_add(PageDesc *p, TranslationBlock *tb, unsigned int n, tb_page_addr_t page_addr) { #ifndef CONFIG_USER_ONLY bool page_already_protected; #endif assert_page_locked(p); tb->page_addr[n] = page_addr; tb->page_next[n] = p->first_tb; #ifndef CONFIG_USER_ONLY page_already_protected = p->first_tb != (uintptr_t)NULL; #endif p->first_tb = (uintptr_t)tb | n; invalidate_page_bitmap(p); #if defined(CONFIG_USER_ONLY) if (p->flags & PAGE_WRITE) { target_ulong addr; PageDesc *p2; int prot; /* force the host page as non writable (writes will have a page fault + mprotect overhead) */ page_addr &= qemu_host_page_mask; prot = 0; for (addr = page_addr; addr < page_addr + qemu_host_page_size; addr += TARGET_PAGE_SIZE) { p2 = page_find(addr >> TARGET_PAGE_BITS); if (!p2) { continue; } prot |= p2->flags; p2->flags &= ~PAGE_WRITE; } mprotect(g2h_untagged(page_addr), qemu_host_page_size, (prot & PAGE_BITS) & ~PAGE_WRITE); if (DEBUG_TB_INVALIDATE_GATE) { printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr); } } #else /* if some code is already present, then the pages are already protected. So we handle the case where only the first TB is allocated in a physical page */ if (!page_already_protected) { tlb_protect_code(page_addr); } #endif } /* add a new TB and link it to the physical page tables. phys_page2 is * (-1) to indicate that only one page contains the TB. * * Called with mmap_lock held for user-mode emulation. * * Returns a pointer @tb, or a pointer to an existing TB that matches @tb. * Note that in !user-mode, another thread might have already added a TB * for the same block of guest code that @tb corresponds to. In that case, * the caller should discard the original @tb, and use instead the returned TB. */ static TranslationBlock * tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, tb_page_addr_t phys_page2) { PageDesc *p; PageDesc *p2 = NULL; assert_memory_lock(); if (phys_pc == -1) { /* * If the TB is not associated with a physical RAM page then * it must be a temporary one-insn TB, and we have nothing to do * except fill in the page_addr[] fields. */ assert(tb->cflags & CF_NOCACHE); tb->page_addr[0] = tb->page_addr[1] = -1; return tb; } /* * Add the TB to the page list, acquiring first the pages's locks. * We keep the locks held until after inserting the TB in the hash table, * so that if the insertion fails we know for sure that the TBs are still * in the page descriptors. * Note that inserting into the hash table first isn't an option, since * we can only insert TBs that are fully initialized. */ page_lock_pair(&p, phys_pc, &p2, phys_page2, 1); tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK); if (p2) { tb_page_add(p2, tb, 1, phys_page2); } else { tb->page_addr[1] = -1; } if (!(tb->cflags & CF_NOCACHE)) { void *existing_tb = NULL; uint32_t h; /* add in the hash table */ h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK, tb->trace_vcpu_dstate); qht_insert(&tb_ctx.htable, tb, h, &existing_tb); /* remove TB from the page(s) if we couldn't insert it */ if (unlikely(existing_tb)) { tb_page_remove(p, tb); invalidate_page_bitmap(p); if (p2) { tb_page_remove(p2, tb); invalidate_page_bitmap(p2); } tb = existing_tb; } } if (p2 && p2 != p) { page_unlock(p2); } page_unlock(p); #ifdef CONFIG_USER_ONLY if (DEBUG_TB_CHECK_GATE) { tb_page_check(); } #endif return tb; } /* Called with mmap_lock held for user mode emulation. */ TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc, target_ulong cs_base, uint32_t flags, int cflags) { CPUArchState *env = cpu->env_ptr; TranslationBlock *tb, *existing_tb; tb_page_addr_t phys_pc, phys_page2; target_ulong virt_page2; tcg_insn_unit *gen_code_buf; int gen_code_size, search_size, max_insns; #ifdef CONFIG_PROFILER TCGProfile *prof = &tcg_ctx->prof; int64_t ti; #endif assert_memory_lock(); qemu_thread_jit_write(); phys_pc = get_page_addr_code(env, pc); if (phys_pc == -1) { /* Generate a temporary TB with 1 insn in it */ cflags &= ~CF_COUNT_MASK; cflags |= CF_NOCACHE | 1; } cflags &= ~CF_CLUSTER_MASK; cflags |= cpu->cluster_index << CF_CLUSTER_SHIFT; max_insns = cflags & CF_COUNT_MASK; if (max_insns == 0) { max_insns = CF_COUNT_MASK; } if (max_insns > TCG_MAX_INSNS) { max_insns = TCG_MAX_INSNS; } if (cpu->singlestep_enabled || singlestep) { max_insns = 1; } buffer_overflow: tb = tcg_tb_alloc(tcg_ctx); if (unlikely(!tb)) { /* flush must be done */ tb_flush(cpu); mmap_unlock(); /* Make the execution loop process the flush as soon as possible. */ cpu->exception_index = EXCP_INTERRUPT; cpu_loop_exit(cpu); } gen_code_buf = tcg_ctx->code_gen_ptr; tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf); tb->pc = pc; tb->cs_base = cs_base; tb->flags = flags; tb->cflags = cflags; tb->orig_tb = NULL; tb->trace_vcpu_dstate = *cpu->trace_dstate; tcg_ctx->tb_cflags = cflags; tb_overflow: #ifdef CONFIG_PROFILER /* includes aborted translations because of exceptions */ qatomic_set(&prof->tb_count1, prof->tb_count1 + 1); ti = profile_getclock(); #endif gen_code_size = sigsetjmp(tcg_ctx->jmp_trans, 0); if (unlikely(gen_code_size != 0)) { goto error_return; } tcg_func_start(tcg_ctx); tcg_ctx->cpu = env_cpu(env); gen_intermediate_code(cpu, tb, max_insns); tcg_ctx->cpu = NULL; max_insns = tb->icount; trace_translate_block(tb, tb->pc, tb->tc.ptr); /* generate machine code */ tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID; tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID; tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset; if (TCG_TARGET_HAS_direct_jump) { tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg; tcg_ctx->tb_jmp_target_addr = NULL; } else { tcg_ctx->tb_jmp_insn_offset = NULL; tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg; } #ifdef CONFIG_PROFILER qatomic_set(&prof->tb_count, prof->tb_count + 1); qatomic_set(&prof->interm_time, prof->interm_time + profile_getclock() - ti); ti = profile_getclock(); #endif gen_code_size = tcg_gen_code(tcg_ctx, tb); if (unlikely(gen_code_size < 0)) { error_return: switch (gen_code_size) { case -1: /* * Overflow of code_gen_buffer, or the current slice of it. * * TODO: We don't need to re-do gen_intermediate_code, nor * should we re-do the tcg optimization currently hidden * inside tcg_gen_code. All that should be required is to * flush the TBs, allocate a new TB, re-initialize it per * above, and re-do the actual code generation. */ qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT, "Restarting code generation for " "code_gen_buffer overflow\n"); goto buffer_overflow; case -2: /* * The code generated for the TranslationBlock is too large. * The maximum size allowed by the unwind info is 64k. * There may be stricter constraints from relocations * in the tcg backend. * * Try again with half as many insns as we attempted this time. * If a single insn overflows, there's a bug somewhere... */ assert(max_insns > 1); max_insns /= 2; qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT, "Restarting code generation with " "smaller translation block (max %d insns)\n", max_insns); goto tb_overflow; default: g_assert_not_reached(); } } search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size); if (unlikely(search_size < 0)) { goto buffer_overflow; } tb->tc.size = gen_code_size; #ifdef CONFIG_PROFILER qatomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti); qatomic_set(&prof->code_in_len, prof->code_in_len + tb->size); qatomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size); qatomic_set(&prof->search_out_len, prof->search_out_len + search_size); #endif #ifdef DEBUG_DISAS if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) && qemu_log_in_addr_range(tb->pc)) { FILE *logfile = qemu_log_lock(); int code_size, data_size; const tcg_target_ulong *rx_data_gen_ptr; size_t chunk_start; int insn = 0; if (tcg_ctx->data_gen_ptr) { rx_data_gen_ptr = tcg_splitwx_to_rx(tcg_ctx->data_gen_ptr); code_size = (const void *)rx_data_gen_ptr - tb->tc.ptr; data_size = gen_code_size - code_size; } else { rx_data_gen_ptr = 0; code_size = gen_code_size; data_size = 0; } /* Dump header and the first instruction */ qemu_log("OUT: [size=%d]\n", gen_code_size); qemu_log(" -- guest addr 0x" TARGET_FMT_lx " + tb prologue\n", tcg_ctx->gen_insn_data[insn][0]); chunk_start = tcg_ctx->gen_insn_end_off[insn]; log_disas(tb->tc.ptr, chunk_start); /* * Dump each instruction chunk, wrapping up empty chunks into * the next instruction. The whole array is offset so the * first entry is the beginning of the 2nd instruction. */ while (insn < tb->icount) { size_t chunk_end = tcg_ctx->gen_insn_end_off[insn]; if (chunk_end > chunk_start) { qemu_log(" -- guest addr 0x" TARGET_FMT_lx "\n", tcg_ctx->gen_insn_data[insn][0]); log_disas(tb->tc.ptr + chunk_start, chunk_end - chunk_start); chunk_start = chunk_end; } insn++; } if (chunk_start < code_size) { qemu_log(" -- tb slow paths + alignment\n"); log_disas(tb->tc.ptr + chunk_start, code_size - chunk_start); } /* Finally dump any data we may have after the block */ if (data_size) { int i; qemu_log(" data: [size=%d]\n", data_size); for (i = 0; i < data_size / sizeof(tcg_target_ulong); i++) { qemu_log("0x%08" PRIxPTR ": .quad 0x%" TCG_PRIlx "\n", (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]); } } qemu_log("\n"); qemu_log_flush(); qemu_log_unlock(logfile); } #endif qatomic_set(&tcg_ctx->code_gen_ptr, (void *) ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size, CODE_GEN_ALIGN)); /* init jump list */ qemu_spin_init(&tb->jmp_lock); tb->jmp_list_head = (uintptr_t)NULL; tb->jmp_list_next[0] = (uintptr_t)NULL; tb->jmp_list_next[1] = (uintptr_t)NULL; tb->jmp_dest[0] = (uintptr_t)NULL; tb->jmp_dest[1] = (uintptr_t)NULL; /* init original jump addresses which have been set during tcg_gen_code() */ if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) { tb_reset_jump(tb, 0); } if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) { tb_reset_jump(tb, 1); } /* check next page if needed */ virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; phys_page2 = -1; if ((pc & TARGET_PAGE_MASK) != virt_page2) { phys_page2 = get_page_addr_code(env, virt_page2); } /* * No explicit memory barrier is required -- tb_link_page() makes the * TB visible in a consistent state. */ existing_tb = tb_link_page(tb, phys_pc, phys_page2); /* if the TB already exists, discard what we just translated */ if (unlikely(existing_tb != tb)) { uintptr_t orig_aligned = (uintptr_t)gen_code_buf; orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize); qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned); tb_destroy(tb); return existing_tb; } tcg_tb_insert(tb); return tb; } /* * @p must be non-NULL. * user-mode: call with mmap_lock held. * !user-mode: call with all @pages locked. */ static void tb_invalidate_phys_page_range__locked(struct page_collection *pages, PageDesc *p, tb_page_addr_t start, tb_page_addr_t end, uintptr_t retaddr) { TranslationBlock *tb; tb_page_addr_t tb_start, tb_end; int n; #ifdef TARGET_HAS_PRECISE_SMC CPUState *cpu = current_cpu; CPUArchState *env = NULL; bool current_tb_not_found = retaddr != 0; bool current_tb_modified = false; TranslationBlock *current_tb = NULL; target_ulong current_pc = 0; target_ulong current_cs_base = 0; uint32_t current_flags = 0; #endif /* TARGET_HAS_PRECISE_SMC */ assert_page_locked(p); #if defined(TARGET_HAS_PRECISE_SMC) if (cpu != NULL) { env = cpu->env_ptr; } #endif /* we remove all the TBs in the range [start, end[ */ /* XXX: see if in some cases it could be faster to invalidate all the code */ PAGE_FOR_EACH_TB(p, tb, n) { assert_page_locked(p); /* NOTE: this is subtle as a TB may span two physical pages */ if (n == 0) { /* NOTE: tb_end may be after the end of the page, but it is not a problem */ tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); tb_end = tb_start + tb->size; } else { tb_start = tb->page_addr[1]; tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); } if (!(tb_end <= start || tb_start >= end)) { #ifdef TARGET_HAS_PRECISE_SMC if (current_tb_not_found) { current_tb_not_found = false; /* now we have a real cpu fault */ current_tb = tcg_tb_lookup(retaddr); } if (current_tb == tb && (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { /* * If we are modifying the current TB, we must stop * its execution. We could be more precise by checking * that the modification is after the current PC, but it * would require a specialized function to partially * restore the CPU state. */ current_tb_modified = true; cpu_restore_state_from_tb(cpu, current_tb, retaddr, true); cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base, &current_flags); } #endif /* TARGET_HAS_PRECISE_SMC */ tb_phys_invalidate__locked(tb); } } #if !defined(CONFIG_USER_ONLY) /* if no code remaining, no need to continue to use slow writes */ if (!p->first_tb) { invalidate_page_bitmap(p); tlb_unprotect_code(start); } #endif #ifdef TARGET_HAS_PRECISE_SMC if (current_tb_modified) { page_collection_unlock(pages); /* Force execution of one insn next time. */ cpu->cflags_next_tb = 1 | curr_cflags(); mmap_unlock(); cpu_loop_exit_noexc(cpu); } #endif } /* * Invalidate all TBs which intersect with the target physical address range * [start;end[. NOTE: start and end must refer to the *same* physical page. * 'is_cpu_write_access' should be true if called from a real cpu write * access: the virtual CPU will exit the current TB if code is modified inside * this TB. * * Called with mmap_lock held for user-mode emulation */ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end) { struct page_collection *pages; PageDesc *p; assert_memory_lock(); p = page_find(start >> TARGET_PAGE_BITS); if (p == NULL) { return; } pages = page_collection_lock(start, end); tb_invalidate_phys_page_range__locked(pages, p, start, end, 0); page_collection_unlock(pages); } /* * Invalidate all TBs which intersect with the target physical address range * [start;end[. NOTE: start and end may refer to *different* physical pages. * 'is_cpu_write_access' should be true if called from a real cpu write * access: the virtual CPU will exit the current TB if code is modified inside * this TB. * * Called with mmap_lock held for user-mode emulation. */ #ifdef CONFIG_SOFTMMU void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end) #else void tb_invalidate_phys_range(target_ulong start, target_ulong end) #endif { struct page_collection *pages; tb_page_addr_t next; assert_memory_lock(); pages = page_collection_lock(start, end); for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; start < end; start = next, next += TARGET_PAGE_SIZE) { PageDesc *pd = page_find(start >> TARGET_PAGE_BITS); tb_page_addr_t bound = MIN(next, end); if (pd == NULL) { continue; } tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0); } page_collection_unlock(pages); } #ifdef CONFIG_SOFTMMU /* len must be <= 8 and start must be a multiple of len. * Called via softmmu_template.h when code areas are written to with * iothread mutex not held. * * Call with all @pages in the range [@start, @start + len[ locked. */ void tb_invalidate_phys_page_fast(struct page_collection *pages, tb_page_addr_t start, int len, uintptr_t retaddr) { PageDesc *p; assert_memory_lock(); p = page_find(start >> TARGET_PAGE_BITS); if (!p) { return; } assert_page_locked(p); if (!p->code_bitmap && ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) { build_page_bitmap(p); } if (p->code_bitmap) { unsigned int nr; unsigned long b; nr = start & ~TARGET_PAGE_MASK; b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1)); if (b & ((1 << len) - 1)) { goto do_invalidate; } } else { do_invalidate: tb_invalidate_phys_page_range__locked(pages, p, start, start + len, retaddr); } } #else /* Called with mmap_lock held. If pc is not 0 then it indicates the * host PC of the faulting store instruction that caused this invalidate. * Returns true if the caller needs to abort execution of the current * TB (because it was modified by this store and the guest CPU has * precise-SMC semantics). */ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc) { TranslationBlock *tb; PageDesc *p; int n; #ifdef TARGET_HAS_PRECISE_SMC TranslationBlock *current_tb = NULL; CPUState *cpu = current_cpu; CPUArchState *env = NULL; int current_tb_modified = 0; target_ulong current_pc = 0; target_ulong current_cs_base = 0; uint32_t current_flags = 0; #endif assert_memory_lock(); addr &= TARGET_PAGE_MASK; p = page_find(addr >> TARGET_PAGE_BITS); if (!p) { return false; } #ifdef TARGET_HAS_PRECISE_SMC if (p->first_tb && pc != 0) { current_tb = tcg_tb_lookup(pc); } if (cpu != NULL) { env = cpu->env_ptr; } #endif assert_page_locked(p); PAGE_FOR_EACH_TB(p, tb, n) { #ifdef TARGET_HAS_PRECISE_SMC if (current_tb == tb && (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { /* If we are modifying the current TB, we must stop its execution. We could be more precise by checking that the modification is after the current PC, but it would require a specialized function to partially restore the CPU state */ current_tb_modified = 1; cpu_restore_state_from_tb(cpu, current_tb, pc, true); cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base, &current_flags); } #endif /* TARGET_HAS_PRECISE_SMC */ tb_phys_invalidate(tb, addr); } p->first_tb = (uintptr_t)NULL; #ifdef TARGET_HAS_PRECISE_SMC if (current_tb_modified) { /* Force execution of one insn next time. */ cpu->cflags_next_tb = 1 | curr_cflags(); return true; } #endif return false; } #endif /* user-mode: call with mmap_lock held */ void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr) { TranslationBlock *tb; assert_memory_lock(); tb = tcg_tb_lookup(retaddr); if (tb) { /* We can use retranslation to find the PC. */ cpu_restore_state_from_tb(cpu, tb, retaddr, true); tb_phys_invalidate(tb, -1); } else { /* The exception probably happened in a helper. The CPU state should have been saved before calling it. Fetch the PC from there. */ CPUArchState *env = cpu->env_ptr; target_ulong pc, cs_base; tb_page_addr_t addr; uint32_t flags; cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); addr = get_page_addr_code(env, pc); if (addr != -1) { tb_invalidate_phys_range(addr, addr + 1); } } } #ifndef CONFIG_USER_ONLY /* in deterministic execution mode, instructions doing device I/Os * must be at the end of the TB. * * Called by softmmu_template.h, with iothread mutex not held. */ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) { TranslationBlock *tb; CPUClass *cc; uint32_t n; tb = tcg_tb_lookup(retaddr); if (!tb) { cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p", (void *)retaddr); } cpu_restore_state_from_tb(cpu, tb, retaddr, true); /* * Some guests must re-execute the branch when re-executing a delay * slot instruction. When this is the case, adjust icount and N * to account for the re-execution of the branch. */ n = 1; cc = CPU_GET_CLASS(cpu); if (cc->tcg_ops->io_recompile_replay_branch && cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) { cpu_neg(cpu)->icount_decr.u16.low++; n = 2; } /* Generate a new TB executing the I/O insn. */ cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n; if (tb_cflags(tb) & CF_NOCACHE) { if (tb->orig_tb) { /* Invalidate original TB if this TB was generated in * cpu_exec_nocache() */ tb_phys_invalidate(tb->orig_tb, -1); } tcg_tb_remove(tb); tb_destroy(tb); } qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc, "cpu_io_recompile: rewound execution of TB to " TARGET_FMT_lx "\n", tb->pc); /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not * the first in the TB) then we end up generating a whole new TB and * repeating the fault, which is horribly inefficient. * Better would be to execute just this insn uncached, or generate a * second new TB. */ cpu_loop_exit_noexc(cpu); } static void print_qht_statistics(struct qht_stats hst) { uint32_t hgram_opts; size_t hgram_bins; char *hgram; if (!hst.head_buckets) { return; } qemu_printf("TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n", hst.used_head_buckets, hst.head_buckets, (double)hst.used_head_buckets / hst.head_buckets * 100); hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT; if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) { hgram_opts |= QDIST_PR_NODECIMAL; } hgram = qdist_pr(&hst.occupancy, 10, hgram_opts); qemu_printf("TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n", qdist_avg(&hst.occupancy) * 100, hgram); g_free(hgram); hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain); if (hgram_bins > 10) { hgram_bins = 10; } else { hgram_bins = 0; hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE; } hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts); qemu_printf("TB hash avg chain %0.3f buckets. Histogram: %s\n", qdist_avg(&hst.chain), hgram); g_free(hgram); } struct tb_tree_stats { size_t nb_tbs; size_t host_size; size_t target_size; size_t max_target_size; size_t direct_jmp_count; size_t direct_jmp2_count; size_t cross_page; }; static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data) { const TranslationBlock *tb = value; struct tb_tree_stats *tst = data; tst->nb_tbs++; tst->host_size += tb->tc.size; tst->target_size += tb->size; if (tb->size > tst->max_target_size) { tst->max_target_size = tb->size; } if (tb->page_addr[1] != -1) { tst->cross_page++; } if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) { tst->direct_jmp_count++; if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) { tst->direct_jmp2_count++; } } return false; } void dump_exec_info(void) { struct tb_tree_stats tst = {}; struct qht_stats hst; size_t nb_tbs, flush_full, flush_part, flush_elide; tcg_tb_foreach(tb_tree_stats_iter, &tst); nb_tbs = tst.nb_tbs; /* XXX: avoid using doubles ? */ qemu_printf("Translation buffer state:\n"); /* * Report total code size including the padding and TB structs; * otherwise users might think "-accel tcg,tb-size" is not honoured. * For avg host size we use the precise numbers from tb_tree_stats though. */ qemu_printf("gen code size %zu/%zu\n", tcg_code_size(), tcg_code_capacity()); qemu_printf("TB count %zu\n", nb_tbs); qemu_printf("TB avg target size %zu max=%zu bytes\n", nb_tbs ? tst.target_size / nb_tbs : 0, tst.max_target_size); qemu_printf("TB avg host size %zu bytes (expansion ratio: %0.1f)\n", nb_tbs ? tst.host_size / nb_tbs : 0, tst.target_size ? (double)tst.host_size / tst.target_size : 0); qemu_printf("cross page TB count %zu (%zu%%)\n", tst.cross_page, nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0); qemu_printf("direct jump count %zu (%zu%%) (2 jumps=%zu %zu%%)\n", tst.direct_jmp_count, nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0, tst.direct_jmp2_count, nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0); qht_statistics_init(&tb_ctx.htable, &hst); print_qht_statistics(hst); qht_statistics_destroy(&hst); qemu_printf("\nStatistics:\n"); qemu_printf("TB flush count %u\n", qatomic_read(&tb_ctx.tb_flush_count)); qemu_printf("TB invalidate count %zu\n", tcg_tb_phys_invalidate_count()); tlb_flush_counts(&flush_full, &flush_part, &flush_elide); qemu_printf("TLB full flushes %zu\n", flush_full); qemu_printf("TLB partial flushes %zu\n", flush_part); qemu_printf("TLB elided flushes %zu\n", flush_elide); tcg_dump_info(); } void dump_opcount_info(void) { tcg_dump_op_count(); } #else /* CONFIG_USER_ONLY */ void cpu_interrupt(CPUState *cpu, int mask) { g_assert(qemu_mutex_iothread_locked()); cpu->interrupt_request |= mask; qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1); } /* * Walks guest process memory "regions" one by one * and calls callback function 'fn' for each region. */ struct walk_memory_regions_data { walk_memory_regions_fn fn; void *priv; target_ulong start; int prot; }; static int walk_memory_regions_end(struct walk_memory_regions_data *data, target_ulong end, int new_prot) { if (data->start != -1u) { int rc = data->fn(data->priv, data->start, end, data->prot); if (rc != 0) { return rc; } } data->start = (new_prot ? end : -1u); data->prot = new_prot; return 0; } static int walk_memory_regions_1(struct walk_memory_regions_data *data, target_ulong base, int level, void **lp) { target_ulong pa; int i, rc; if (*lp == NULL) { return walk_memory_regions_end(data, base, 0); } if (level == 0) { PageDesc *pd = *lp; for (i = 0; i < V_L2_SIZE; ++i) { int prot = pd[i].flags; pa = base | (i << TARGET_PAGE_BITS); if (prot != data->prot) { rc = walk_memory_regions_end(data, pa, prot); if (rc != 0) { return rc; } } } } else { void **pp = *lp; for (i = 0; i < V_L2_SIZE; ++i) { pa = base | ((target_ulong)i << (TARGET_PAGE_BITS + V_L2_BITS * level)); rc = walk_memory_regions_1(data, pa, level - 1, pp + i); if (rc != 0) { return rc; } } } return 0; } int walk_memory_regions(void *priv, walk_memory_regions_fn fn) { struct walk_memory_regions_data data; uintptr_t i, l1_sz = v_l1_size; data.fn = fn; data.priv = priv; data.start = -1u; data.prot = 0; for (i = 0; i < l1_sz; i++) { target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS); int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i); if (rc != 0) { return rc; } } return walk_memory_regions_end(&data, 0, 0); } static int dump_region(void *priv, target_ulong start, target_ulong end, unsigned long prot) { FILE *f = (FILE *)priv; (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx " "TARGET_FMT_lx" %c%c%c\n", start, end, end - start, ((prot & PAGE_READ) ? 'r' : '-'), ((prot & PAGE_WRITE) ? 'w' : '-'), ((prot & PAGE_EXEC) ? 'x' : '-')); return 0; } /* dump memory mappings */ void page_dump(FILE *f) { const int length = sizeof(target_ulong) * 2; (void) fprintf(f, "%-*s %-*s %-*s %s\n", length, "start", length, "end", length, "size", "prot"); walk_memory_regions(f, dump_region); } int page_get_flags(target_ulong address) { PageDesc *p; p = page_find(address >> TARGET_PAGE_BITS); if (!p) { return 0; } return p->flags; } /* Modify the flags of a page and invalidate the code if necessary. The flag PAGE_WRITE_ORG is positioned automatically depending on PAGE_WRITE. The mmap_lock should already be held. */ void page_set_flags(target_ulong start, target_ulong end, int flags) { target_ulong addr, len; bool reset_target_data; /* This function should never be called with addresses outside the guest address space. If this assert fires, it probably indicates a missing call to h2g_valid. */ assert(end - 1 <= GUEST_ADDR_MAX); assert(start < end); assert_memory_lock(); start = start & TARGET_PAGE_MASK; end = TARGET_PAGE_ALIGN(end); if (flags & PAGE_WRITE) { flags |= PAGE_WRITE_ORG; } reset_target_data = !(flags & PAGE_VALID) || (flags & PAGE_RESET); flags &= ~PAGE_RESET; for (addr = start, len = end - start; len != 0; len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1); /* If the write protection bit is set, then we invalidate the code inside. */ if (!(p->flags & PAGE_WRITE) && (flags & PAGE_WRITE) && p->first_tb) { tb_invalidate_phys_page(addr, 0); } if (reset_target_data && p->target_data) { g_free(p->target_data); p->target_data = NULL; } p->flags = flags; } } void *page_get_target_data(target_ulong address) { PageDesc *p = page_find(address >> TARGET_PAGE_BITS); return p ? p->target_data : NULL; } void *page_alloc_target_data(target_ulong address, size_t size) { PageDesc *p = page_find(address >> TARGET_PAGE_BITS); void *ret = NULL; if (p->flags & PAGE_VALID) { ret = p->target_data; if (!ret) { p->target_data = ret = g_malloc0(size); } } return ret; } int page_check_range(target_ulong start, target_ulong len, int flags) { PageDesc *p; target_ulong end; target_ulong addr; /* This function should never be called with addresses outside the guest address space. If this assert fires, it probably indicates a missing call to h2g_valid. */ if (TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS) { assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); } if (len == 0) { return 0; } if (start + len - 1 < start) { /* We've wrapped around. */ return -1; } /* must do before we loose bits in the next step */ end = TARGET_PAGE_ALIGN(start + len); start = start & TARGET_PAGE_MASK; for (addr = start, len = end - start; len != 0; len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { p = page_find(addr >> TARGET_PAGE_BITS); if (!p) { return -1; } if (!(p->flags & PAGE_VALID)) { return -1; } if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) { return -1; } if (flags & PAGE_WRITE) { if (!(p->flags & PAGE_WRITE_ORG)) { return -1; } /* unprotect the page if it was put read-only because it contains translated code */ if (!(p->flags & PAGE_WRITE)) { if (!page_unprotect(addr, 0)) { return -1; } } } } return 0; } /* called from signal handler: invalidate the code and unprotect the * page. Return 0 if the fault was not handled, 1 if it was handled, * and 2 if it was handled but the caller must cause the TB to be * immediately exited. (We can only return 2 if the 'pc' argument is * non-zero.) */ int page_unprotect(target_ulong address, uintptr_t pc) { unsigned int prot; bool current_tb_invalidated; PageDesc *p; target_ulong host_start, host_end, addr; /* Technically this isn't safe inside a signal handler. However we know this only ever happens in a synchronous SEGV handler, so in practice it seems to be ok. */ mmap_lock(); p = page_find(address >> TARGET_PAGE_BITS); if (!p) { mmap_unlock(); return 0; } /* if the page was really writable, then we change its protection back to writable */ if (p->flags & PAGE_WRITE_ORG) { current_tb_invalidated = false; if (p->flags & PAGE_WRITE) { /* If the page is actually marked WRITE then assume this is because * this thread raced with another one which got here first and * set the page to PAGE_WRITE and did the TB invalidate for us. */ #ifdef TARGET_HAS_PRECISE_SMC TranslationBlock *current_tb = tcg_tb_lookup(pc); if (current_tb) { current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID; } #endif } else { host_start = address & qemu_host_page_mask; host_end = host_start + qemu_host_page_size; prot = 0; for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) { p = page_find(addr >> TARGET_PAGE_BITS); p->flags |= PAGE_WRITE; prot |= p->flags; /* and since the content will be modified, we must invalidate the corresponding translated code. */ current_tb_invalidated |= tb_invalidate_phys_page(addr, pc); #ifdef CONFIG_USER_ONLY if (DEBUG_TB_CHECK_GATE) { tb_invalidate_check(addr); } #endif } mprotect((void *)g2h_untagged(host_start), qemu_host_page_size, prot & PAGE_BITS); } mmap_unlock(); /* If current TB was invalidated return to main loop */ return current_tb_invalidated ? 2 : 1; } mmap_unlock(); return 0; } #endif /* CONFIG_USER_ONLY */ /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */ void tcg_flush_softmmu_tlb(CPUState *cs) { #ifdef CONFIG_SOFTMMU tlb_flush(cs); #endif }