[gdb] Fix data race in bcache::insert

Data race between:
...
  Read of size 8 at 0x7bb4000000d0 by thread T4:
    #0 gdb::bcache::insert(void const*, int, bool*) gdb/bcache.c:155
    #1 objfile_per_bfd_storage::intern(char const*) gdb/objfiles.h:250
    #2 objfile::intern(char const*) gdb/objfiles.h:475
    #3 dwarf2_canonicalize_name gdb/dwarf2/read.c:21904
    #4 dwarf2_name gdb/dwarf2/read.c:21999
    #5 read_base_type gdb/dwarf2/read.c:17092
    #6 read_type_die_1 gdb/dwarf2/read.c:21529
    #7 read_type_die gdb/dwarf2/read.c:21464
    #8 process_die gdb/dwarf2/read.c:8674
    #9 read_file_scope gdb/dwarf2/read.c:9610
    #10 process_die gdb/dwarf2/read.c:8614
    #11 process_full_comp_unit gdb/dwarf2/read.c:8383
    #12 process_queue_item gdb/dwarf2/read.c:7592
...
and:
...
  Previous write of size 8 at 0x7bb4000000d0 by main thread:
    #0 gdb::bcache::insert(void const*, int, bool*) gdb/bcache.c:167
    #1 objfile_per_bfd_storage::intern(std::__cxx11::basic_string<char,
    std::char_traits<char>, std::allocator<char> > const&) gdb/objfiles.h:257
    #2 objfile::intern(std::__cxx11::basic_string<char,
    std::char_traits<char>, std::allocator<char> > const&) <null>
    #3 dwarf2_compute_name gdb/dwarf2/read.c:9050
    #4 dwarf2_full_name gdb/dwarf2/read.c:9070
    #5 read_structure_type gdb/dwarf2/read.c:14558
    #6 process_structure_scope gdb/dwarf2/read.c:14847
    #7 process_die gdb/dwarf2/read.c:8643
    #8 read_file_scope gdb/dwarf2/read.c:9610
    #9 process_die gdb/dwarf2/read.c:8614
    #10 process_full_comp_unit gdb/dwarf2/read.c:8383
    #11 process_queue_item gdb/dwarf2/read.c:7592
...
This commit is contained in:
Tom de Vries
2022-07-15 17:45:53 +02:00
parent b0a46c96e4
commit 2f8fd1da0e

View File

@@ -24,6 +24,11 @@
#include "bcache.h"
#include <algorithm>
#include <mutex>
#if CXX_STD_THREAD
static std::mutex bcache_lock;
#endif
namespace gdb {
@@ -63,6 +68,9 @@ struct bstring
void
bcache::expand_hash_table ()
{
#if CXX_STD_THREAD
//std::lock_guard<std::mutex> guard (bcache_lock);
#endif
/* A table of good hash table sizes. Whenever we grow, we pick the
next larger size from this table. sizes[i] is close to 1 << (i+10),
so we roughly double the table size each time. After we fall off
@@ -142,6 +150,9 @@ bcache::expand_hash_table ()
const void *
bcache::insert (const void *addr, int length, bool *added)
{
#if CXX_STD_THREAD
std::lock_guard<std::mutex> guard (bcache_lock);
#endif
unsigned long full_hash;
unsigned short half_hash;
int hash_index;