mirror of
https://github.com/bminor/binutils-gdb.git
synced 2025-12-16 04:18:50 +00:00
This commit is the result of the following actions:
- Running gdb/copyright.py to update all of the copyright headers to
include 2024,
- Manually updating a few files the copyright.py script told me to
update, these files had copyright headers embedded within the
file,
- Regenerating gdbsupport/Makefile.in to refresh it's copyright
date,
- Using grep to find other files that still mentioned 2023. If
these files were updated last year from 2022 to 2023 then I've
updated them this year to 2024.
I'm sure I've probably missed some dates. Feel free to fix them up as
you spot them.
213 lines
6.1 KiB
Plaintext
213 lines
6.1 KiB
Plaintext
# Copyright 1992-2024 Free Software Foundation, Inc.
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
# it under the terms of the GNU General Public License as published by
|
|
# the Free Software Foundation; either version 3 of the License, or
|
|
# (at your option) any later version.
|
|
#
|
|
# This program is distributed in the hope that it will be useful,
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
# GNU General Public License for more details.
|
|
#
|
|
# You should have received a copy of the GNU General Public License
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
# This file is based on corefile.exp which was written by Fred
|
|
# Fish. (fnf@cygnus.com)
|
|
|
|
|
|
# Are we on a target board? As of 2004-02-12, GDB didn't have a
|
|
# mechanism that would let it efficiently access a remote corefile.
|
|
|
|
require isnative
|
|
|
|
# Can the system run this test (in particular support sparse
|
|
# corefiles)? On systems that lack sparse corefile support this test
|
|
# consumes too many resources - gigabytes worth of disk space and
|
|
# I/O bandwith.
|
|
|
|
if { [istarget "*-*-*bsd*"]
|
|
|| [istarget "*-*-solaris*"]
|
|
|| [istarget "*-*-darwin*"]
|
|
|| [istarget "*-*-cygwin*"] } {
|
|
untested "kernel lacks sparse corefile support (PR gdb/1551)"
|
|
return
|
|
}
|
|
|
|
standard_testfile .c
|
|
set corefile [standard_output_file ${binfile}.corefile]
|
|
|
|
if { [gdb_compile "${srcdir}/${subdir}/${srcfile}" "${binfile}" executable {debug}] != "" } {
|
|
untested "failed to compile"
|
|
return -1
|
|
}
|
|
|
|
# Run GDB on the bigcore program up-to where it will dump core.
|
|
|
|
clean_restart ${binfile}
|
|
gdb_test_no_output "set print sevenbit-strings"
|
|
gdb_test_no_output "set width 0"
|
|
|
|
# Get the core into the output directory.
|
|
set_inferior_cwd_to_output_dir
|
|
|
|
if {![runto_main]} {
|
|
return 0
|
|
}
|
|
set print_core_line [gdb_get_line_number "Dump core"]
|
|
gdb_test "tbreak $print_core_line"
|
|
gdb_test continue ".*print_string.*"
|
|
gdb_test next ".*0 = 0.*"
|
|
|
|
# Traverse part of bigcore's linked list of memory chunks (forward or
|
|
# backward), saving each chunk's address.
|
|
|
|
proc extract_heap { dir } {
|
|
global gdb_prompt
|
|
global expect_out
|
|
set heap ""
|
|
set test "extract ${dir} heap"
|
|
set lim 0
|
|
gdb_test_multiple "print heap.${dir}" "$test" {
|
|
-re " = \\(struct list \\*\\) 0x0.*$gdb_prompt $" {
|
|
pass "$test"
|
|
}
|
|
-re " = \\(struct list \\*\\) (0x\[0-9a-f\]*).*$gdb_prompt $" {
|
|
set heap [concat $heap $expect_out(1,string)]
|
|
if { $lim >= 200 } {
|
|
pass "$test (stop at $lim)"
|
|
} else {
|
|
incr lim
|
|
send_gdb "print \$.${dir}\n"
|
|
exp_continue
|
|
}
|
|
}
|
|
-re ".*$gdb_prompt $" {
|
|
fail "$test (entry $lim)"
|
|
}
|
|
timeout {
|
|
fail "$test (timeout)"
|
|
}
|
|
}
|
|
return $heap
|
|
}
|
|
set next_heap [extract_heap next]
|
|
set prev_heap [extract_heap prev]
|
|
|
|
# Save the total allocated size within GDB so that we can check
|
|
# the core size later.
|
|
gdb_test_no_output "set \$bytes_allocated = bytes_allocated" "save heap size"
|
|
|
|
# Now create a core dump
|
|
|
|
# Rename the core file to "TESTFILE.corefile" rather than just "core",
|
|
# to avoid problems with sys admin types that like to regularly prune
|
|
# all files named "core" from the system.
|
|
|
|
# Some systems append "core" to the name of the program; others append
|
|
# the name of the program to "core"; still others (like Linux, as of
|
|
# May 2003) create cores named "core.PID".
|
|
|
|
# Save the process ID. Some systems dump the core into core.PID.
|
|
set inferior_pid [get_inferior_pid]
|
|
|
|
# Dump core using SIGABRT
|
|
set oldtimeout $timeout
|
|
set timeout 600
|
|
gdb_test "signal SIGABRT" "Program terminated with signal SIGABRT, .*"
|
|
set timeout $oldtimeout
|
|
|
|
# Find the corefile.
|
|
set file [find_core_file $inferior_pid]
|
|
if { $file != "" } {
|
|
remote_exec build "mv $file $corefile"
|
|
} else {
|
|
untested "can't generate a core file"
|
|
return 0
|
|
}
|
|
|
|
# Check that the corefile is plausibly large enough. We're trying to
|
|
# detect the case where the operating system has truncated the file
|
|
# just before signed wraparound. TCL, unfortunately, has a similar
|
|
# problem - so use catch. It can handle the "bad" size but not
|
|
# necessarily the "good" one. And we must use GDB for the comparison,
|
|
# similarly.
|
|
|
|
if {[catch {file size $corefile} core_size] == 0} {
|
|
set core_ok 0
|
|
gdb_test_multiple "print \$bytes_allocated < $core_size" "check core size" {
|
|
-re " = 1\r\n$gdb_prompt $" {
|
|
pass "check core size"
|
|
set core_ok 1
|
|
}
|
|
-re " = 0\r\n$gdb_prompt $" {
|
|
pass "check core size"
|
|
set core_ok 0
|
|
}
|
|
}
|
|
} {
|
|
# Probably failed due to the TCL build having problems with very
|
|
# large values. Since GDB uses a 64-bit off_t (when possible) it
|
|
# shouldn't have this problem. Assume that things are going to
|
|
# work. Without this assumption the test is skiped on systems
|
|
# (such as i386 GNU/Linux with patched kernel) which do pass.
|
|
pass "check core size"
|
|
set core_ok 1
|
|
}
|
|
if {! $core_ok} {
|
|
untested "check core size (system does not support large corefiles)"
|
|
return 0
|
|
}
|
|
|
|
# Now load up that core file
|
|
|
|
set test "load corefile"
|
|
gdb_test_multiple "core $corefile" "$test" {
|
|
-re "A program is being debugged already. Kill it. .y or n. " {
|
|
send_gdb "y\n"
|
|
exp_continue
|
|
}
|
|
-re "Core was generated by.*$gdb_prompt $" {
|
|
pass "$test"
|
|
}
|
|
}
|
|
|
|
# Finally, re-traverse bigcore's linked list, checking each chunk's
|
|
# address against the executable. Don't use gdb_test_multiple as want
|
|
# only one pass/fail. Don't use exp_continue as the regular
|
|
# expression involving $heap needs to be re-evaluated for each new
|
|
# response.
|
|
|
|
proc check_heap { dir heap } {
|
|
global gdb_prompt
|
|
set test "check ${dir} heap"
|
|
set ok 1
|
|
set lim 0
|
|
send_gdb "print heap.${dir}\n"
|
|
while { $ok } {
|
|
gdb_expect {
|
|
-re " = \\(struct list \\*\\) [lindex $heap $lim].*$gdb_prompt $" {
|
|
if { $lim >= [llength $heap] } {
|
|
pass "$test"
|
|
set ok 0
|
|
} else {
|
|
incr lim
|
|
send_gdb "print \$.${dir}\n"
|
|
}
|
|
}
|
|
-re ".*$gdb_prompt $" {
|
|
fail "$test (address [lindex $heap $lim])"
|
|
set ok 0
|
|
}
|
|
timeout {
|
|
fail "$test (timeout)"
|
|
set ok 0
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
check_heap next $next_heap
|
|
check_heap prev $prev_heap
|